code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if num <= 0:
_snake_case = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(_SCREAMING_SNAKE_CASE )
_snake_case = [True] * (num + 1)
_snake_case = []
_snake_case = 2
_snake_case = int(math.sqrt(_SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , _SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_snake_case = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 585
|
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> Any:
_snake_case = name
_snake_case = val
def __str__(self ) -> List[str]:
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__(self , UpperCAmelCase ) -> Any:
return self.val < other.val
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> Dict:
_snake_case = {}
_snake_case = {}
_snake_case = self.build_heap(UpperCAmelCase )
def __getitem__(self , UpperCAmelCase ) -> Union[str, Any]:
return self.get_value(UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Dict:
return (idx - 1) // 2
def lowercase (self , UpperCAmelCase ) -> Optional[Any]:
return idx * 2 + 1
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
return idx * 2 + 2
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
return self.heap_dict[key]
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = len(UpperCAmelCase ) - 1
_snake_case = self.get_parent_idx(UpperCAmelCase )
for idx, i in enumerate(UpperCAmelCase ):
_snake_case = idx
_snake_case = i.val
for i in range(UpperCAmelCase , -1 , -1 ):
self.sift_down(UpperCAmelCase , UpperCAmelCase )
return array
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
while True:
_snake_case = self.get_left_child_idx(UpperCAmelCase ) # noqa: E741
_snake_case = self.get_right_child_idx(UpperCAmelCase )
_snake_case = idx
if l < len(UpperCAmelCase ) and array[l] < array[idx]:
_snake_case = l
if r < len(UpperCAmelCase ) and array[r] < array[smallest]:
_snake_case = r
if smallest != idx:
_snake_case, _snake_case = array[smallest], array[idx]
(
(
_snake_case
), (
_snake_case
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_snake_case = smallest
else:
break
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = self.get_parent_idx(UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_snake_case, _snake_case = self.heap[idx], self.heap[p]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_snake_case = p
_snake_case = self.get_parent_idx(UpperCAmelCase )
def lowercase (self ) -> Optional[int]:
return self.heap[0]
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.heap[-1], self.heap[0]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_snake_case = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase (self , UpperCAmelCase ) -> List[str]:
self.heap.append(UpperCAmelCase )
_snake_case = len(self.heap ) - 1
_snake_case = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase (self ) -> int:
return len(self.heap ) == 0
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_snake_case = new_value
_snake_case = new_value
self.sift_up(self.idx_of_element[node] )
__lowerCAmelCase = Node('R', -1)
__lowerCAmelCase = Node('B', 6)
__lowerCAmelCase = Node('A', 3)
__lowerCAmelCase = Node('X', 1)
__lowerCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__lowerCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 585
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : str = 'convnextv2'
def __init__( self: Optional[Any] , UpperCamelCase_: int=3 , UpperCamelCase_: Dict=4 , UpperCamelCase_: int=4 , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Any=1E-12 , UpperCamelCase_: List[str]=0.0 , UpperCamelCase_: Optional[int]=2_24 , UpperCamelCase_: List[str]=None , UpperCamelCase_: Any=None , **UpperCamelCase_: int , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = num_stages
__lowerCamelCase = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
__lowerCamelCase = [3, 3, 9, 3] if depths is None else depths
__lowerCamelCase = hidden_act
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = drop_path_rate
__lowerCamelCase = image_size
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 712
|
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__A : Any = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Union[str, Any] = logging.getLogger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if metric == "rouge2":
_A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_A = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_A = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_A = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class lowerCamelCase( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
_A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_A = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A = od / 'test_results.txt'
_A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_A = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , 'a+' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
_A = val.item()
_A = F"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
_A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
try:
_A = pl_module.model.model.num_parameters()
except AttributeError:
_A = pl_module.model.num_parameters()
_A = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , 'test' )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 27
| 1
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def A ( _UpperCAmelCase : dict , _UpperCAmelCase : str , _UpperCAmelCase : set , _UpperCAmelCase : set , _UpperCAmelCase : dict , _UpperCAmelCase : dict , _UpperCAmelCase : PriorityQueue , _UpperCAmelCase : dict , _UpperCAmelCase : float | int , ) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_UpperCAmelCase = cst_fwd.get(_UpperCAmelCase , np.inf )
_UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_UpperCAmelCase = new_cost_f
_UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : dict , _UpperCAmelCase : dict ) -> int:
'''simple docstring'''
_UpperCAmelCase = -1
_UpperCAmelCase = set()
_UpperCAmelCase = set()
_UpperCAmelCase = {source: 0}
_UpperCAmelCase = {destination: 0}
_UpperCAmelCase = {source: None}
_UpperCAmelCase = {destination: None}
_UpperCAmelCase = PriorityQueue()
_UpperCAmelCase = PriorityQueue()
_UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_UpperCAmelCase , _UpperCAmelCase = queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_UpperCAmelCase = pass_and_relaxation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
_UpperCAmelCase = pass_and_relaxation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_UpperCAmelCase = shortest_distance
return shortest_path_distance
UpperCAmelCase__ = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
UpperCAmelCase__ = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 1
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowerCamelCase : Optional[int] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = {}
state_dict.pop('pixel_mean' , A__ )
state_dict.pop('pixel_std' , A__ )
UpperCamelCase = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCamelCase = key.replace(A__ , A__ )
if re.match(A__ , A__ ):
UpperCamelCase = int(re.match(A__ , A__ ).group(2 ) )
if layer_nb == 0:
UpperCamelCase = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
UpperCamelCase = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
UpperCamelCase = key.replace('layers.2' , 'proj_out' )
UpperCamelCase = value
UpperCamelCase = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def __lowerCamelCase ( A__ , A__ , A__ , A__="ybelkada/segment-anything" ) -> List[str]:
"""simple docstring"""
UpperCamelCase = hf_hub_download(A__ , F"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
UpperCamelCase = SamConfig()
elif "sam_vit_l" in model_name:
UpperCamelCase = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
UpperCamelCase = SamConfig(
vision_config=A__ , )
elif "sam_vit_h" in model_name:
UpperCamelCase = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
UpperCamelCase = SamConfig(
vision_config=A__ , )
UpperCamelCase = torch.load(A__ , map_location='cpu' )
UpperCamelCase = replace_keys(A__ )
UpperCamelCase = SamImageProcessor()
UpperCamelCase = SamProcessor(image_processor=A__ )
UpperCamelCase = SamModel(A__ )
hf_model.load_state_dict(A__ )
UpperCamelCase = hf_model.to('cuda' )
UpperCamelCase = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
UpperCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw ).convert('RGB' )
UpperCamelCase = [[[400, 650]]]
UpperCamelCase = [[1]]
UpperCamelCase = processor(images=np.array(A__ ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
UpperCamelCase = hf_model(**A__ )
UpperCamelCase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
UpperCamelCase = processor(
images=np.array(A__ ) , input_points=A__ , input_labels=A__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
UpperCamelCase = hf_model(**A__ )
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
UpperCamelCase = ((75, 275, 1_725, 850),)
UpperCamelCase = processor(images=np.array(A__ ) , input_boxes=A__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
UpperCamelCase = hf_model(**A__ )
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
UpperCamelCase = [[[400, 650], [800, 650]]]
UpperCamelCase = [[1, 1]]
UpperCamelCase = processor(
images=np.array(A__ ) , input_points=A__ , input_labels=A__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
UpperCamelCase = hf_model(**A__ )
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
_lowerCamelCase : Dict = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 430
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = BlipImageProcessor()
UpperCamelCase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
UpperCamelCase = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
UpperCamelCase = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def A ( self : Tuple , **UpperCamelCase__ : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def A ( self : Any , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def A ( self : List[Any] , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer
def A ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
UpperCamelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(UpperCamelCase__ , return_tensors='np' )
UpperCamelCase = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = processor(text=UpperCamelCase__ )
UpperCamelCase = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
UpperCamelCase = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(UpperCamelCase__ )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 430
| 1
|
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = model.config
_UpperCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_UpperCamelCase = MBartConfig(
is_decoder=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , add_cross_attention=lowerCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowerCAmelCase , add_final_layer_norm=lowerCAmelCase , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
if "encoder.model" in name:
_UpperCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_UpperCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_UpperCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_UpperCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_UpperCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_UpperCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_UpperCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_UpperCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_UpperCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_UpperCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_UpperCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_UpperCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_UpperCamelCase = '''encoder.layernorm.bias'''
return name
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
_UpperCamelCase = orig_state_dict.pop(lowerCAmelCase )
if "qkv" in key:
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_split[3] )
_UpperCamelCase = int(key_split[5] )
_UpperCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCamelCase = val[:dim, :]
_UpperCamelCase = val[dim : dim * 2, :]
_UpperCamelCase = val[-dim:, :]
else:
_UpperCamelCase = val[:dim]
_UpperCamelCase = val[dim : dim * 2]
_UpperCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_UpperCamelCase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=False ):
# load original model
_UpperCamelCase = DonutModel.from_pretrained(lowerCAmelCase ).eval()
# load HuggingFace model
_UpperCamelCase , _UpperCamelCase = get_configs(lowerCAmelCase )
_UpperCamelCase = DonutSwinModel(lowerCAmelCase )
_UpperCamelCase = MBartForCausalLM(lowerCAmelCase )
_UpperCamelCase = VisionEncoderDecoderModel(encoder=lowerCAmelCase , decoder=lowerCAmelCase )
model.eval()
_UpperCamelCase = original_model.state_dict()
_UpperCamelCase = convert_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
# verify results on scanned document
_UpperCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_UpperCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained(lowerCAmelCase , from_slow=lowerCAmelCase )
_UpperCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_UpperCamelCase = DonutProcessor(lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase = processor(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_UpperCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_UpperCamelCase = '''When is the coffee break?'''
_UpperCamelCase = task_prompt.replace('''{user_input}''' , lowerCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_UpperCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_UpperCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_UpperCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_UpperCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_UpperCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_UpperCamelCase = original_model.decoder.tokenizer(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_UpperCamelCase = original_model.encoder.model.patch_embed(lowerCAmelCase )
_UpperCamelCase , _UpperCamelCase = model.encoder.embeddings(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
# verify encoder hidden states
_UpperCamelCase = original_model.encoder(lowerCAmelCase )
_UpperCamelCase = model.encoder(lowerCAmelCase ).last_hidden_state
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-2 )
# verify decoder hidden states
_UpperCamelCase = original_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).logits
_UpperCamelCase = model(lowerCAmelCase , decoder_input_ids=lowerCAmelCase ).logits
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
lowercase : int = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 704
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase : Optional[Any] = logging.getLogger(__name__)
class __A( __UpperCAmelCase ):
def __init__( self, A=-1 ):
"""simple docstring"""
_UpperCamelCase = label_idx
def _UpperCamelCase ( self, A, A ):
"""simple docstring"""
if isinstance(A, A ):
_UpperCamelCase = mode.value
_UpperCamelCase = os.path.join(A, F'''{mode}.txt''' )
_UpperCamelCase = 1
_UpperCamelCase = []
with open(A, encoding='''utf-8''' ) as f:
_UpperCamelCase = []
_UpperCamelCase = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''', words=A, labels=A ) )
guid_index += 1
_UpperCamelCase = []
_UpperCamelCase = []
else:
_UpperCamelCase = line.split(''' ''' )
words.append(splits[0] )
if len(A ) > 1:
labels.append(splits[self.label_idx].replace('''\n''', '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''', words=A, labels=A ) )
return examples
def _UpperCamelCase ( self, A, A, A ):
"""simple docstring"""
_UpperCamelCase = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(A )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_UpperCamelCase = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(A )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''', line.split()[0] )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
if path:
with open(A, '''r''' ) as f:
_UpperCamelCase = f.read().splitlines()
if "O" not in labels:
_UpperCamelCase = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __A( __UpperCAmelCase ):
def __init__( self ):
"""simple docstring"""
super().__init__(label_idx=-2 )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
if path:
with open(A, '''r''' ) as f:
_UpperCamelCase = f.read().splitlines()
if "O" not in labels:
_UpperCamelCase = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __A( __UpperCAmelCase ):
def _UpperCamelCase ( self, A, A ):
"""simple docstring"""
if isinstance(A, A ):
_UpperCamelCase = mode.value
_UpperCamelCase = os.path.join(A, F'''{mode}.txt''' )
_UpperCamelCase = 1
_UpperCamelCase = []
with open(A, encoding='''utf-8''' ) as f:
for sentence in parse_incr(A ):
_UpperCamelCase = []
_UpperCamelCase = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(A ) == len(A )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''', words=A, labels=A ) )
guid_index += 1
return examples
def _UpperCamelCase ( self, A, A, A ):
"""simple docstring"""
_UpperCamelCase = 0
for sentence in parse_incr(A ):
_UpperCamelCase = preds_list[example_id]
_UpperCamelCase = ''''''
for token in sentence:
out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(A )
example_id += 1
def _UpperCamelCase ( self, A ):
"""simple docstring"""
if path:
with open(A, '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 105
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __magic_name__ (_lowercase ):
lowerCamelCase__ = "biogpt"
def __init__( self , _a=42384 , _a=1024 , _a=24 , _a=16 , _a=4096 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1024 , _a=0.0_2 , _a=1E-12 , _a=True , _a=True , _a=0.0 , _a=0.0 , _a=1 , _a=0 , _a=2 , **_a , ) -> Dict:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = scale_embedding
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = layerdrop
lowerCAmelCase_ = activation_dropout
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 122
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : List[Any] = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Optional[int] = frequency_stride
_lowerCamelCase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCamelCase : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ASTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase : str = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.default_feature_extractor
_lowerCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Dict = audio.squeeze().numpy()
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ = logging.get_logger(__name__)
a_ = Dict[str, Any]
a_ = List[Prediction]
@add_end_docstrings(snake_case )
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Any , *__lowerCAmelCase: Tuple , **__lowerCAmelCase: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _UpperCAmelCase ( self: List[Any] , **__lowerCAmelCase: str ) -> str:
'''simple docstring'''
__UpperCAmelCase = {}
if "threshold" in kwargs:
__UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self: Optional[Any] , *__lowerCAmelCase: Union[str, Any] , **__lowerCAmelCase: Optional[int] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
def _UpperCAmelCase ( self: int , __lowerCAmelCase: Any ) -> str:
'''simple docstring'''
__UpperCAmelCase = load_image(__lowerCAmelCase )
__UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
__UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
__UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
__UpperCAmelCase = target_size
return inputs
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: List[Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = model_inputs.pop("target_size" )
__UpperCAmelCase = self.model(**__lowerCAmelCase )
__UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
__UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: List[Any]=0.9 ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__UpperCAmelCase , __UpperCAmelCase = target_size[0].tolist()
def unnormalize(__lowerCAmelCase: str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
__UpperCAmelCase , __UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__UpperCAmelCase = [unnormalize(__lowerCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
__UpperCAmelCase = ["score", "label", "box"]
__UpperCAmelCase = [dict(zip(__lowerCAmelCase , __lowerCAmelCase ) ) for vals in zip(scores.tolist() , __lowerCAmelCase , __lowerCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__UpperCAmelCase = self.image_processor.post_process_object_detection(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = raw_annotations[0]
__UpperCAmelCase = raw_annotation["scores"]
__UpperCAmelCase = raw_annotation["labels"]
__UpperCAmelCase = raw_annotation["boxes"]
__UpperCAmelCase = scores.tolist()
__UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
__UpperCAmelCase = [self._get_bounding_box(__lowerCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__UpperCAmelCase = ["score", "label", "box"]
__UpperCAmelCase = [
dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = box.int().tolist()
__UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 710
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : int = DiTPipeline
lowerCAmelCase__ : Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCAmelCase__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
lowerCAmelCase__ : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase__ : Optional[int] = False
def _UpperCAmelCase ( self: str ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowerCAmelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=__lowerCAmelCase , )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = DDIMScheduler()
__UpperCAmelCase = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def _UpperCAmelCase ( self: str , __lowerCAmelCase: str , __lowerCAmelCase: Optional[int]=0 ) -> int:
'''simple docstring'''
if str(__lowerCAmelCase ).startswith("mps" ):
__UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
__UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
__UpperCAmelCase = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCAmelCase ( self: Tuple ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = "cpu"
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
__UpperCAmelCase = pipe(**__lowerCAmelCase ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def _UpperCAmelCase ( self: int ) -> str:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: Any ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self: Dict ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__UpperCAmelCase = ["vase", "umbrella", "white shark", "white wolf"]
__UpperCAmelCase = pipe.get_label_ids(__lowerCAmelCase )
__UpperCAmelCase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__UpperCAmelCase = ["vase", "umbrella"]
__UpperCAmelCase = pipe.get_label_ids(__lowerCAmelCase )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 286
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_a: int = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Tuple = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Any = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Union[str, Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Optional[Any] = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 162
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_a: int = """src/diffusers"""
# Matches is_xxx_available()
_a: Tuple = re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
_a: Any = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
_a: Union[str, Any] = """
{0} = None
"""
_a: Tuple = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
_a: Tuple = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def __lowerCAmelCase ( A ):
UpperCAmelCase_ = _re_backend.findall(A )
if len(A ) == 0:
return None
return "_and_".join(A )
def __lowerCAmelCase ( ):
with open(os.path.join(A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase_ = 0
UpperCAmelCase_ = {}
# Go through the end of the file
while line_index < len(A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase_ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while line_index < len(A ) and len(lines[line_index] ) > 1:
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_single_line_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(A ) > 0:
UpperCAmelCase_ = objects
else:
line_index += 1
return backend_specific_objects
def __lowerCAmelCase ( A , A ):
if name.isupper():
return DUMMY_CONSTANT.format(A )
elif name.islower():
return DUMMY_FUNCTION.format(A , A )
else:
return DUMMY_CLASS.format(A , A )
def __lowerCAmelCase ( A=None ):
if backend_specific_objects is None:
UpperCAmelCase_ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase_ = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase_ = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(A , A ) for o in objects] )
UpperCAmelCase_ = dummy_file
return dummy_files
def __lowerCAmelCase ( A=False ):
UpperCAmelCase_ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase_ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase_ = os.path.join(A , "utils" )
UpperCAmelCase_ = {
backend: os.path.join(A , F"dummy_{short_names.get(A , A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase_ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(A ):
with open(A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ = f.read()
else:
UpperCAmelCase_ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(A , A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(A , A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
_a: Dict = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_a: Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 162
| 1
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Any = AutoencoderKL
_UpperCamelCase : str = """sample"""
_UpperCamelCase : Any = 1e-2
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 4
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (3, 32, 32)
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
lowercase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def SCREAMING_SNAKE_CASE__ ( self ):
# enable deterministic behavior for gradient checkpointing
lowercase , lowercase = self.prepare_init_args_and_inputs_for_common()
lowercase = self.model_class(**snake_case )
model.to(snake_case )
assert not model.is_gradient_checkpointing and model.training
lowercase = model(**snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase = torch.randn_like(snake_case )
lowercase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase = self.model_class(**snake_case )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(snake_case )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase = model_a(**snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
lowercase = dict(model.named_parameters() )
lowercase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(snake_case )
lowercase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
lowercase = model.to(snake_case )
model.eval()
if torch_device == "mps":
lowercase = torch.manual_seed(0 )
else:
lowercase = torch.Generator(device=snake_case ).manual_seed(0 )
lowercase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase = image.to(snake_case )
with torch.no_grad():
lowercase = model(snake_case , sample_posterior=snake_case , generator=snake_case ).sample
lowercase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
lowercase = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
lowercase = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(snake_case ) for s in shape] )}.npy'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 , snake_case=(4, 3, 512, 512) , snake_case=False ):
lowercase = torch.floataa if fpaa else torch.floataa
lowercase = torch.from_numpy(load_hf_numpy(self.get_file_format(snake_case , snake_case ) ) ).to(snake_case ).to(snake_case )
return image
def SCREAMING_SNAKE_CASE__ ( self , snake_case="CompVis/stable-diffusion-v1-4" , snake_case=False ):
lowercase = 'fp16' if fpaa else None
lowercase = torch.floataa if fpaa else torch.floataa
lowercase = AutoencoderKL.from_pretrained(
snake_case , subfolder='vae' , torch_dtype=snake_case , revision=snake_case , )
model.to(snake_case ).eval()
return model
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if torch_device == "mps":
return torch.manual_seed(snake_case )
return torch.Generator(device=snake_case ).manual_seed(snake_case )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self.get_sd_vae_model()
lowercase = self.get_sd_image(snake_case )
lowercase = self.get_generator(snake_case )
with torch.no_grad():
lowercase = model(snake_case , generator=snake_case , sample_posterior=snake_case ).sample
assert sample.shape == image.shape
lowercase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(snake_case , snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = self.get_sd_vae_model(fpaa=snake_case )
lowercase = self.get_sd_image(snake_case , fpaa=snake_case )
lowercase = self.get_generator(snake_case )
with torch.no_grad():
lowercase = model(snake_case , generator=snake_case , sample_posterior=snake_case ).sample
assert sample.shape == image.shape
lowercase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase = torch.tensor(snake_case )
assert torch_all_close(snake_case , snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self.get_sd_vae_model()
lowercase = self.get_sd_image(snake_case )
with torch.no_grad():
lowercase = model(snake_case ).sample
assert sample.shape == image.shape
lowercase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(snake_case , snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = self.get_sd_vae_model()
lowercase = self.get_sd_image(snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase = model.decode(snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase = torch.tensor(snake_case )
assert torch_all_close(snake_case , snake_case , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = self.get_sd_vae_model(fpaa=snake_case )
lowercase = self.get_sd_image(snake_case , shape=(3, 4, 64, 64) , fpaa=snake_case )
with torch.no_grad():
lowercase = model.decode(snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase = torch.tensor(snake_case )
assert torch_all_close(snake_case , snake_case , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.get_sd_vae_model(fpaa=snake_case )
lowercase = self.get_sd_image(snake_case , shape=(3, 4, 64, 64) , fpaa=snake_case )
with torch.no_grad():
lowercase = model.decode(snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase = model.decode(snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(snake_case , snake_case , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.get_sd_vae_model()
lowercase = self.get_sd_image(snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase = model.decode(snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase = model.decode(snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(snake_case , snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = self.get_sd_vae_model()
lowercase = self.get_sd_image(snake_case )
lowercase = self.get_generator(snake_case )
with torch.no_grad():
lowercase = model.encode(snake_case ).latent_dist
lowercase = dist.sample(generator=snake_case )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase = torch.tensor(snake_case )
lowercase = 3E-3 if torch_device != 'mps' else 1E-2
assert torch_all_close(snake_case , snake_case , atol=snake_case )
| 565
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''DeiTFeatureExtractor''']
UpperCAmelCase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 565
| 1
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = JukeboxTokenizer
__SCREAMING_SNAKE_CASE = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def __snake_case( self ):
import torch
_UpperCAmelCase : int = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_UpperCAmelCase : List[str] = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_UpperCAmelCase : Union[str, Any] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __snake_case( self ):
import torch
_UpperCAmelCase : Tuple = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_UpperCAmelCase : int = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_UpperCAmelCase : str = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 643
|
SCREAMING_SNAKE_CASE__ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.602176634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def a__ ( snake_case__ : str , snake_case__ : str , snake_case__ : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_UpperCAmelCase : Optional[Any] = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {", ".join(snake_case__ )}'''
)
raise ValueError(snake_case__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 1
|
'''simple docstring'''
def A ( _UpperCAmelCase : int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
__lowerCAmelCase : str = limit + 1
__lowerCAmelCase : List[str] = [0] * limit
for first_term in range(1 ,_UpperCAmelCase ):
for n in range(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
__lowerCAmelCase : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__lowerCAmelCase : str = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 123
|
'''simple docstring'''
def A ( _UpperCAmelCase : int = 5_0 ) -> int:
'''simple docstring'''
__lowerCAmelCase : Any = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 123
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__UpperCAmelCase = logging.get_logger(__name__)
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[int] , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE : Tuple = deprecated_arg[3:]
SCREAMING_SNAKE_CASE : int = not kwargs.pop(lowerCamelCase_ )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("""tpu_name""" , self.tpu_name )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""device_idx""" , self.device_idx )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""eager_mode""" , self.eager_mode )
SCREAMING_SNAKE_CASE : Any = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Name of TPU'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Benchmark models in eager model.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE : int = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE : List[Any] = None
return tpu
@cached_property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE : Optional[int] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
SCREAMING_SNAKE_CASE : Optional[int] = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
SCREAMING_SNAKE_CASE : List[str] = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.n_gpu > 0
| 379
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__UpperCAmelCase = """CompVis/stable-diffusion-v1-1"""
__UpperCAmelCase = """CompVis/stable-diffusion-v1-2"""
__UpperCAmelCase = """CompVis/stable-diffusion-v1-3"""
__UpperCAmelCase = """CompVis/stable-diffusion-v1-4"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : AutoencoderKL , lowerCamelCase_ : CLIPTextModel , lowerCamelCase_ : CLIPTokenizer , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ : StableDiffusionSafetyChecker , lowerCamelCase_ : CLIPImageProcessor , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionPipeline(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith("""_""" )}
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : str , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowerCamelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : List[str] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : int = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 379
| 1
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A :
def __init__(self : Optional[Any] , __a : Tuple , __a : List[Any]=2 , __a : Tuple=3 , __a : Union[str, Any]=4 , __a : List[Any]=2 , __a : List[str]=7 , __a : str=True , __a : List[Any]=True , __a : str=True , __a : Union[str, Any]=True , __a : Dict=99 , __a : Union[str, Any]=36 , __a : Dict=3 , __a : int=4 , __a : Tuple=37 , __a : Union[str, Any]="gelu" , __a : int=0.1 , __a : Optional[Any]=0.1 , __a : int=512 , __a : List[Any]=16 , __a : Any=2 , __a : Dict=0.02 , __a : List[Any]=6 , __a : Union[str, Any]=6 , __a : Tuple=3 , __a : Tuple=4 , __a : Optional[Any]=None , __a : str=1000 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = text_seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = coordinate_size
UpperCAmelCase_ = shape_size
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
UpperCAmelCase_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase_ = text_seq_length
UpperCAmelCase_ = (image_size // patch_size) ** 2 + 1
UpperCAmelCase_ = self.text_seq_length + self.image_seq_length
def _lowercase (self : str ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ = bbox[i, j, 3]
UpperCAmelCase_ = bbox[i, j, 1]
UpperCAmelCase_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ = bbox[i, j, 2]
UpperCAmelCase_ = bbox[i, j, 0]
UpperCAmelCase_ = t
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowercase (self : Any , __a : Optional[Any] , __a : Optional[Any] , __a : List[str] , __a : List[str] , __a : int , __a : Optional[Any] , __a : Tuple , __a : Tuple ):
UpperCAmelCase_ = LayoutLMvaModel(config=__A )
model.to(__A )
model.eval()
# text + image
UpperCAmelCase_ = model(__A , pixel_values=__A )
UpperCAmelCase_ = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A )
UpperCAmelCase_ = model(__A , bbox=__A , pixel_values=__A , token_type_ids=__A )
UpperCAmelCase_ = model(__A , bbox=__A , pixel_values=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase_ = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase_ = model(pixel_values=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowercase (self : List[Any] , __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : int , __a : List[str] , __a : Any , __a : str , __a : Dict ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = LayoutLMvaForSequenceClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase_ = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase (self : List[Any] , __a : int , __a : List[Any] , __a : Optional[int] , __a : Dict , __a : Optional[Any] , __a : Union[str, Any] , __a : List[str] , __a : List[Any] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = LayoutLMvaForTokenClassification(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase_ = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowercase (self : Union[str, Any] , __a : Tuple , __a : Union[str, Any] , __a : Tuple , __a : Optional[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : int ):
UpperCAmelCase_ = LayoutLMvaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase_ = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Dict = False
a__ : List[str] = False
a__ : Tuple = False
a__ : Tuple = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ : str = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _lowercase (self : Any , __a : str , __a : Dict , __a : Optional[int] , __a : List[str] , __a : Union[str, Any] ):
return True
def _lowercase (self : int ):
UpperCAmelCase_ = LayoutLMvaModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__A , hidden_size=37 )
def _lowercase (self : str , __a : Any , __a : Union[str, Any] , __a : List[str]=False ):
UpperCAmelCase_ = copy.deepcopy(__A )
if model_class in get_values(__A ):
UpperCAmelCase_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
UpperCAmelCase_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in get_values(__A ):
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
UpperCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__A , )
return inputs_dict
def _lowercase (self : str ):
self.config_tester.run_common_tests()
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*__A )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def _lowercase (self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = LayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCAmelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __A ( unittest.TestCase ):
@cached_property
def _lowercase (self : str ):
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def _lowercase (self : str ):
UpperCAmelCase_ = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(__A )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__A , return_tensors="pt" ).pixel_values.to(__A )
UpperCAmelCase_ = torch.tensor([[1, 2]] )
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCAmelCase_ = model(
input_ids=input_ids.to(__A ) , bbox=bbox.to(__A ) , pixel_values=pixel_values.to(__A ) , )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __A )
UpperCAmelCase_ = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ) )
| 715
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = """Wav2Vec2FeatureExtractor"""
a__ : Dict = """AutoTokenizer"""
def __init__(self : Any , __a : Union[str, Any] , __a : List[str] ):
super().__init__(__a , __a )
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
@classmethod
def _lowercase (cls : List[str] , __a : Dict , **__a : int ):
try:
return super().from_pretrained(__a , **__a )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , __a , )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(__a , **__a )
UpperCAmelCase_ = WavaVecaCTCTokenizer.from_pretrained(__a , **__a )
return cls(feature_extractor=__a , tokenizer=__a )
def __call__(self : Optional[int] , *__a : int , **__a : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase_ = kwargs.pop("raw_speech" )
else:
UpperCAmelCase_ = kwargs.pop("audio" , __a )
UpperCAmelCase_ = kwargs.pop("sampling_rate" , __a )
UpperCAmelCase_ = kwargs.pop("text" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
UpperCAmelCase_ = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ = encodings["input_ids"]
return inputs
def _lowercase (self : List[Any] , *__a : Optional[Any] , **__a : Dict ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__a , **__a )
UpperCAmelCase_ = kwargs.pop("input_features" , __a )
UpperCAmelCase_ = kwargs.pop("labels" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if input_features is not None:
UpperCAmelCase_ = self.feature_extractor.pad(__a , *__a , **__a )
if labels is not None:
UpperCAmelCase_ = self.tokenizer.pad(__a , **__a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ = labels["input_ids"]
return input_features
def _lowercase (self : Tuple , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.batch_decode(*__a , **__a )
def _lowercase (self : Any , *__a : Any , **__a : str ):
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def _lowercase (self : Optional[Any] ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer
yield
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
| 415
| 0
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _snake_case (*__lowercase):
with open(__lowercase , 'r') as fh:
fcntl.flock(__lowercase , fcntl.LOCK_EX)
try:
print(*__lowercase)
finally:
fcntl.flock(__lowercase , fcntl.LOCK_UN)
snake_case__ : Optional[int] = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
snake_case__ : List[str] = torch.device("""cuda""", local_rank)
snake_case__ : List[Any] = socket.gethostname()
snake_case__ : List[str] = f'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
snake_case__ : Optional[Any] = dist.get_rank()
snake_case__ : Optional[Any] = dist.get_world_size()
printflock(f'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(f'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(f'{gpu} is broken')
raise
| 23
|
import numpy as np
def _snake_case (__lowercase):
return 1 / (1 + np.exp(-vector))
def _snake_case (__lowercase):
return vector * sigmoid(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
| 1
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
snake_case__ : Dict = logging.get_logger(__name__)
class _a ( A__ ):
"""simple docstring"""
def __init__( self , *_snake_case , **_snake_case ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 702
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[int] = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
snake_case__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 592
| 0
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( a_ ):
__lowerCAmelCase = (DDPMScheduler,)
def __magic_name__ ( self , **_a ):
lowercase : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_a )
return config
def __magic_name__ ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __magic_name__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __magic_name__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __magic_name__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def __magic_name__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __magic_name__ ( self ):
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __magic_name__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __magic_name__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.scheduler_classes[0]
lowercase : Any = self.get_scheduler_config()
lowercase : Dict = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def __magic_name__ ( self ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : Dict = scheduler_class(**_a )
lowercase : Dict = len(_a )
lowercase : str = self.dummy_model()
lowercase : Optional[int] = self.dummy_sample_deter
lowercase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
lowercase : Optional[Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowercase : str = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : List[str] = pred_prev_sample
lowercase : Dict = torch.sum(torch.abs(_a ) )
lowercase : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def __magic_name__ ( self ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : Any = self.get_scheduler_config(prediction_type="v_prediction" )
lowercase : int = scheduler_class(**_a )
lowercase : str = len(_a )
lowercase : Optional[int] = self.dummy_model()
lowercase : List[str] = self.dummy_sample_deter
lowercase : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
lowercase : Union[str, Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowercase : Optional[Any] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : Dict = pred_prev_sample
lowercase : str = torch.sum(torch.abs(_a ) )
lowercase : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def __magic_name__ ( self ):
lowercase : List[Any] = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : Tuple = scheduler_class(**_a )
lowercase : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
lowercase : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
lowercase : Any = -1
else:
lowercase : Union[str, Any] = timesteps[i + 1]
lowercase : Optional[int] = scheduler.previous_timestep(_a )
lowercase : Union[str, Any] = prev_t.item()
self.assertEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : str = self.scheduler_classes[0]
lowercase : List[str] = self.get_scheduler_config()
lowercase : List[Any] = scheduler_class(**_a )
lowercase : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_a , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_a )
def __magic_name__ ( self ):
lowercase : Dict = self.scheduler_classes[0]
lowercase : Union[str, Any] = self.get_scheduler_config()
lowercase : Any = scheduler_class(**_a )
lowercase : int = [100, 87, 50, 1, 0]
lowercase : Any = len(_a )
with self.assertRaises(_a , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def __magic_name__ ( self ):
lowercase : str = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : Optional[int] = scheduler_class(**_a )
lowercase : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_a )
| 361
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def __magic_name__ ( self ):
torch.manual_seed(0 )
lowercase : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __magic_name__ ( self ):
lowercase : List[str] = self.dummy_uncond_unet
lowercase : Dict = KarrasVeScheduler()
lowercase : Any = KarrasVePipeline(unet=_a , scheduler=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowercase : Tuple = torch.manual_seed(0 )
lowercase : Tuple = pipe(num_inference_steps=2 , generator=_a , output_type="numpy" ).images
lowercase : int = torch.manual_seed(0 )
lowercase : Tuple = pipe(num_inference_steps=2 , generator=_a , output_type="numpy" , return_dict=_a )[0]
lowercase : Any = image[0, -3:, -3:, -1]
lowercase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Tuple = "google/ncsnpp-celebahq-256"
lowercase : str = UNetaDModel.from_pretrained(_a )
lowercase : Optional[Any] = KarrasVeScheduler()
lowercase : Union[str, Any] = KarrasVePipeline(unet=_a , scheduler=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowercase : Tuple = torch.manual_seed(0 )
lowercase : Dict = pipe(num_inference_steps=20 , generator=_a , output_type="numpy" ).images
lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 361
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_lowercase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_lowercase = TaTokenizerFast
_lowercase = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_lowercase = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 713
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowercase = 'bert-base-cased'
_lowercase = 'fp16'
_lowercase = 'bf16'
_lowercase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase__ ( A__ ):
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: List[str] = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__a ):
lowerCamelCase__: Union[str, Any] = self.dist_env.copy()
lowerCamelCase__: int = f"""{i + 1}"""
lowerCamelCase__: Any = strategy
with mockenv_context(**__a ):
lowerCamelCase__: Optional[int] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__a ):
lowerCamelCase__: List[str] = self.dist_env.copy()
lowerCamelCase__: List[str] = prefetch_policy
with mockenv_context(**__a ):
lowerCamelCase__: Dict = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__a ):
lowerCamelCase__: str = self.dist_env.copy()
lowerCamelCase__: Tuple = state_dict_type
with mockenv_context(**__a ):
lowerCamelCase__: int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: str = AutoModel.from_pretrained(__a )
for policy in FSDP_AUTO_WRAP_POLICY:
lowerCamelCase__: str = self.dist_env.copy()
lowerCamelCase__: Union[str, Any] = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowerCamelCase__: Union[str, Any] = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
lowerCamelCase__: Optional[Any] = """2000"""
with mockenv_context(**__a ):
lowerCamelCase__: Union[str, Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowerCamelCase__: Optional[Any] = self.dist_env.copy()
lowerCamelCase__: str = """TRANSFORMER_BASED_WRAP"""
lowerCamelCase__: str = """T5Layer"""
with mockenv_context(**__a ):
lowerCamelCase__: Dict = FullyShardedDataParallelPlugin()
with self.assertRaises(__a ) as cm:
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
lowerCamelCase__: Union[str, Any] = self.dist_env.copy()
lowerCamelCase__: int = """SIZE_BASED_WRAP"""
lowerCamelCase__: str = """0"""
with mockenv_context(**__a ):
lowerCamelCase__: Any = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowerCamelCase__: Dict = self.dist_env.copy()
lowerCamelCase__: Union[str, Any] = mp_dtype
with mockenv_context(**__a ):
lowerCamelCase__: List[str] = Accelerator()
if mp_dtype == "fp16":
lowerCamelCase__: Any = torch.floataa
elif mp_dtype == "bf16":
lowerCamelCase__: Dict = torch.bfloataa
lowerCamelCase__: str = MixedPrecision(param_dtype=__a , reduce_dtype=__a , buffer_dtype=__a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__a )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowerCamelCase__: List[Any] = self.dist_env.copy()
lowerCamelCase__: int = str(__a ).lower()
with mockenv_context(**__a ):
lowerCamelCase__: int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__a ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase__ ( A__ ):
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: List[Any] = 0.82
lowerCamelCase__: List[str] = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
lowerCamelCase__: List[str] = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowerCamelCase__: List[str] = 160
lowerCamelCase__: Optional[int] = 160
lowerCamelCase__: Optional[int] = inspect.getfile(accelerate.test_utils )
lowerCamelCase__: Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: List[str] = os.path.join(self.test_scripts_folder , """test_performance.py""" )
lowerCamelCase__: Dict = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
lowerCamelCase__: Dict = cmd.copy()
for i, strategy in enumerate(__a ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: str = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
lowerCamelCase__: Optional[Any] = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__a ):
lowerCamelCase__: Optional[Any] = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
lowerCamelCase__: Union[str, Any] = len(__a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowerCamelCase__: Dict = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
lowerCamelCase__: Union[str, Any] = cmd_config[:-1]
lowerCamelCase__: List[Any] = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: str = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
lowerCamelCase__: int = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowerCamelCase__: str = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__a ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 242
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
a = (7_2_0, 1_2_8_0) # Height, Width
a = (0.4, 0.6) # if height or width lower than this scale, drop it.
a = 1 / 1_0_0
a = ''
a = ''
a = ''
a = 2_5_0
def UpperCAmelCase_ ( ):
lowercase_ , lowercase_ = get_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
for index in range(UpperCAmelCase__ ):
lowercase_ = random.sample(range(len(UpperCAmelCase__ ) ) , 4 )
lowercase_ , lowercase_ , lowercase_ = update_image_and_anno(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , filter_scale=UpperCAmelCase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ = random_chars(3_2 )
lowercase_ = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowercase_ = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , UpperCAmelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase_ = []
for anno in new_annos:
lowercase_ = anno[3] - anno[1]
lowercase_ = anno[4] - anno[2]
lowercase_ = anno[1] + width / 2
lowercase_ = anno[2] + height / 2
lowercase_ = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCAmelCase__ )
with open(F'''{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = []
for label_file in glob.glob(os.path.join(UpperCAmelCase__ , """*.txt""" ) ):
lowercase_ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(UpperCAmelCase__ ) as in_file:
lowercase_ = in_file.readlines()
lowercase_ = os.path.join(UpperCAmelCase__ , F'''{label_name}.jpg''' )
lowercase_ = []
for obj_list in obj_lists:
lowercase_ = obj_list.rstrip("""\n""" ).split(""" """ )
lowercase_ = float(obj[1] ) - float(obj[3] ) / 2
lowercase_ = float(obj[2] ) - float(obj[4] ) / 2
lowercase_ = float(obj[1] ) + float(obj[3] ) / 2
lowercase_ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCAmelCase__ )
labels.append(UpperCAmelCase__ )
return img_paths, labels
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0.0 , ):
lowercase_ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ = int(scale_x * output_size[1] )
lowercase_ = int(scale_y * output_size[0] )
lowercase_ = []
lowercase_ = []
for i, index in enumerate(UpperCAmelCase__ ):
lowercase_ = all_img_list[index]
path_list.append(UpperCAmelCase__ )
lowercase_ = all_annos[index]
lowercase_ = cva.imread(UpperCAmelCase__ )
if i == 0: # top-left
lowercase_ = cva.resize(UpperCAmelCase__ , (divid_point_x, divid_point_y) )
lowercase_ = img
for bbox in img_annos:
lowercase_ = bbox[1] * scale_x
lowercase_ = bbox[2] * scale_y
lowercase_ = bbox[3] * scale_x
lowercase_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase_ = cva.resize(UpperCAmelCase__ , (output_size[1] - divid_point_x, divid_point_y) )
lowercase_ = img
for bbox in img_annos:
lowercase_ = scale_x + bbox[1] * (1 - scale_x)
lowercase_ = bbox[2] * scale_y
lowercase_ = scale_x + bbox[3] * (1 - scale_x)
lowercase_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase_ = cva.resize(UpperCAmelCase__ , (divid_point_x, output_size[0] - divid_point_y) )
lowercase_ = img
for bbox in img_annos:
lowercase_ = bbox[1] * scale_x
lowercase_ = scale_y + bbox[2] * (1 - scale_y)
lowercase_ = bbox[3] * scale_x
lowercase_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase_ = cva.resize(
UpperCAmelCase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase_ = img
for bbox in img_annos:
lowercase_ = scale_x + bbox[1] * (1 - scale_x)
lowercase_ = scale_y + bbox[2] * (1 - scale_y)
lowercase_ = scale_x + bbox[3] * (1 - scale_x)
lowercase_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase_ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert number_char > 1, "The number of character should greater than 1"
lowercase_ = ascii_lowercase + digits
return "".join(random.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 412
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCamelCase__ :
def __init__( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : int=10 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : int=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=32 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[int]=10 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Any=0.9 , UpperCamelCase__ : Dict=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = patch_size
lowercase_ = tubelet_size
lowercase_ = num_frames
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = mask_ratio
lowercase_ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase_ = int(mask_ratio * self.seq_length )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = VideoMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = VideoMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase_ = torch.ones((self.num_masks,) )
lowercase_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase_ = mask.expand(self.batch_size , -1 ).bool()
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
# model only returns predictions for masked patches
lowercase_ = mask.sum().item()
lowercase_ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : str = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Dict = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = VideoMAEModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=False ):
'''simple docstring'''
lowercase_ = copy.deepcopy(UpperCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase_ = torch.ones((self.model_tester.num_masks,) )
lowercase_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase_ = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase_ = bool_masked_pos.to(UpperCamelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCamelCase__ ),
]:
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = VideoMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
lowercase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowercase_ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase_ = True
lowercase_ = False
lowercase_ = True
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowercase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ = True
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowercase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase_ = len(UpperCamelCase__ )
# Check attention is always last and order is fine
lowercase_ = True
lowercase_ = True
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
lowercase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowercase_ = outputs.hidden_states
lowercase_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowercase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowercase_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( ):
lowercase_ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_video()
lowercase_ = image_processor(UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_video()
lowercase_ = image_processor(UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# add boolean mask, indicating which patches to mask
lowercase_ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
lowercase_ = torch.load(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size([1, 1_408, 1_536] )
lowercase_ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase__ )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase_ = torch.tensor([0.5_142] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase_ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=UpperCamelCase__ ).to(
UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
lowercase_ = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
| 412
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_A : Tuple = logging.get_logger(__name__)
_A : Optional[Any] = {"""vocab_file""": """vocab.txt"""}
_A : str = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
_A : Any = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
_A : Optional[int] = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a__ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ConvBertTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ):
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
lowercase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _a ) != do_lower_case
or normalizer_state.get("strip_accents" , _a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _a ) != tokenize_chinese_chars
):
lowercase : Any = getattr(_a , normalizer_state.pop("type" ) )
lowercase : Dict = do_lower_case
lowercase : Dict = strip_accents
lowercase : Optional[int] = tokenize_chinese_chars
lowercase : Dict = normalizer_class(**_a )
lowercase : int = do_lower_case
def __magic_name__ ( self , _a , _a=None ):
lowercase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self , _a , _a = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , _a , _a = None ):
lowercase : Optional[int] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 518
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_A : Any = """."""
if __name__ == "__main__":
_A : Any = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
_A : Any = []
_A : List[Any] = []
with open(doctest_file_path) as fp:
for line in fp:
_A : int = line.strip()
_A : Optional[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_A : Union[str, Any] = """\n""".join(non_existent_paths)
raise ValueError(F"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 518
| 1
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase :
__lowerCamelCase = 42 # [batch_size x 3]
__lowerCamelCase = 42 # [batch_size x 3]
__lowerCamelCase = 42 # [batch_size x 3]
__lowerCamelCase = 42 # [batch_size x 3]
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = torch.arange(self.height * self.width )
lowercase__ = torch.stack(
[
pixel_indices % self.width,
torch.div(_lowercase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ , *lowercase__ = self.shape
lowercase__ = int(np.prod(_lowercase ) )
lowercase__ = self.get_image_coords()
lowercase__ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowercase__ = self.get_camera_rays(_lowercase )
lowercase__ = rays.view(_lowercase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :torch.Tensor ):
'''simple docstring'''
lowercase__ , *lowercase__ , lowercase__ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowercase__ = coords.view(_lowercase , -1 , 2 )
lowercase__ = self.resolution()
lowercase__ = self.fov()
lowercase__ = (flat.float() / (res - 1)) * 2 - 1
lowercase__ = fracs * torch.tan(fov / 2 )
lowercase__ = fracs.view(_lowercase , -1 , 2 )
lowercase__ = (
self.z.view(_lowercase , 1 , 3 )
+ self.x.view(_lowercase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_lowercase , 1 , 3 ) * fracs[:, :, 1:]
)
lowercase__ = directions / directions.norm(dim=-1 , keepdim=_lowercase )
lowercase__ = torch.stack(
[
torch.broadcast_to(self.origin.view(_lowercase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_lowercase , *_lowercase , 2 , 3 )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :int , _lowercase :int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_lowercase , height=_lowercase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _A ( __magic_name__ ):
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowercase__ = np.array([np.sin(__magic_name__ ), np.cos(__magic_name__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowercase__ = -z * 4
lowercase__ = np.array([np.cos(__magic_name__ ), -np.sin(__magic_name__ ), 0.0] )
lowercase__ = np.cross(__magic_name__ , __magic_name__ )
origins.append(__magic_name__ )
xs.append(__magic_name__ )
ys.append(__magic_name__ )
zs.append(__magic_name__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__magic_name__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__magic_name__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__magic_name__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__magic_name__ , axis=0 ) ).float() , width=__magic_name__ , height=__magic_name__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__magic_name__ )) , )
| 655
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655
| 1
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCAmelCase = 'pt'
elif is_tf_available():
UpperCAmelCase = 'tf'
else:
UpperCAmelCase = 'jax'
class __snake_case( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = ByTaTokenizer
UpperCAmelCase : Optional[Any] = False
def __snake_case ( self ) -> List[str]:
super().setUp()
lowerCAmelCase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __snake_case ( self ) -> Tuple:
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def __snake_case ( self , **A_ ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def __snake_case ( self , A_ , A_=False , A_=20 , A_=5 ) -> Tuple[str, list]:
lowerCAmelCase = []
for i in range(len(_lowerCamelCase ) ):
try:
lowerCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase = list(filter(lambda A_ : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , _lowerCamelCase ) )
lowerCAmelCase = list(filter(lambda A_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCamelCase ) , _lowerCamelCase ) )
if max_length is not None and len(_lowerCamelCase ) > max_length:
lowerCAmelCase = toks[:max_length]
if min_length is not None and len(_lowerCamelCase ) < min_length and len(_lowerCamelCase ) > 0:
while len(_lowerCamelCase ) < min_length:
lowerCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
if " " not in output_txt and len(_lowerCamelCase ) > 1:
lowerCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCamelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCamelCase )
)
if with_prefix_space:
lowerCAmelCase = """ """ + output_txt
lowerCAmelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
return output_txt, output_ids
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.ta_base_tokenizer
lowerCAmelCase = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
lowerCAmelCase = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.ta_base_tokenizer
lowerCAmelCase = """Unicode €."""
lowerCAmelCase = tokenizer(_lowerCamelCase )
lowerCAmelCase = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , _lowerCamelCase )
# decoding
lowerCAmelCase = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , """Unicode €.</s>""" )
lowerCAmelCase = tokenizer("""e è é ê ë""" )
lowerCAmelCase = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , _lowerCamelCase )
# decoding
lowerCAmelCase = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.ta_base_tokenizer
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
lowerCAmelCase = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCAmelCase = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
if FRAMEWORK != "jax":
lowerCAmelCase = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.ta_base_tokenizer
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , _lowerCamelCase )
self.assertIn("""attention_mask""" , _lowerCamelCase )
self.assertNotIn("""decoder_input_ids""" , _lowerCamelCase )
self.assertNotIn("""decoder_attention_mask""" , _lowerCamelCase )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.ta_base_tokenizer
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
lowerCAmelCase = tokenizer(
text_target=_lowerCamelCase , max_length=32 , padding="""max_length""" , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.ta_base_tokenizer
lowerCAmelCase = ["""A long paragraph for summarization. </s>"""]
lowerCAmelCase = ["""Summary of the text. </s>"""]
# fmt: off
lowerCAmelCase = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCAmelCase = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCAmelCase = tokenizer(_lowerCamelCase , text_target=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , batch["""input_ids"""][0] )
self.assertEqual(_lowerCamelCase , batch["""labels"""][0] )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
lowerCAmelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
lowerCAmelCase = tokenizer.__class__.from_pretrained(_lowerCamelCase )
lowerCAmelCase = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
lowerCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
lowerCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCAmelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
lowerCAmelCase = tokenizer.__class__.from_pretrained(_lowerCamelCase )
lowerCAmelCase = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase = tokenizer.__class__.from_pretrained(_lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCamelCase )
def __snake_case ( self ) -> str:
lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCAmelCase = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCAmelCase = json.load(_lowerCamelCase )
lowerCAmelCase = [f'<extra_id_{i}>' for i in range(125 )]
lowerCAmelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
lowerCAmelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(_lowerCamelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
with open(os.path.join(_lowerCamelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase = tokenizer_class.from_pretrained(
_lowerCamelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=_lowerCamelCase )]
lowerCAmelCase = tokenizer_class.from_pretrained(
_lowerCamelCase , additional_special_tokens=_lowerCamelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
lowerCAmelCase = tokenizer_class.from_pretrained(_lowerCamelCase )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def __snake_case ( self ) -> List[str]:
pass
def __snake_case ( self ) -> Union[str, Any]:
pass
def __snake_case ( self ) -> List[Any]:
pass
def __snake_case ( self ) -> Union[str, Any]:
pass
def __snake_case ( self ) -> str:
lowerCAmelCase = self.get_tokenizers(fast=_lowerCamelCase , do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
lowerCAmelCase = tokenizer.convert_tokens_to_string(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCAmelCase = 0
lowerCAmelCase = tokenizer.convert_ids_to_tokens(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
for attr in attributes_list:
setattr(_lowerCamelCase , attr + """_id""" , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , attr + """_id""" ) , _lowerCamelCase )
setattr(_lowerCamelCase , attr + """_id""" , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , attr + """_id""" ) , _lowerCamelCase )
setattr(_lowerCamelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_lowerCamelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_lowerCamelCase , """additional_special_tokens_ids""" ) , [] )
setattr(_lowerCamelCase , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(_lowerCamelCase , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(_lowerCamelCase , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 712
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = SwinvaConfig()
lowerCAmelCase = swinva_name.split("""_""" )
lowerCAmelCase = name_split[1]
if "to" in name_split[3]:
lowerCAmelCase = int(name_split[3][-3:] )
else:
lowerCAmelCase = int(name_split[3] )
if "to" in name_split[2]:
lowerCAmelCase = int(name_split[2][-2:] )
else:
lowerCAmelCase = int(name_split[2][6:] )
if model_size == "tiny":
lowerCAmelCase = 96
lowerCAmelCase = (2, 2, 6, 2)
lowerCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase = 96
lowerCAmelCase = (2, 2, 18, 2)
lowerCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase = 128
lowerCAmelCase = (2, 2, 18, 2)
lowerCAmelCase = (4, 8, 16, 32)
else:
lowerCAmelCase = 192
lowerCAmelCase = (2, 2, 18, 2)
lowerCAmelCase = (6, 12, 24, 48)
if "to" in swinva_name:
lowerCAmelCase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowerCAmelCase = 21_841
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = """imagenet-22k-id2label.json"""
lowerCAmelCase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase = 1_000
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = """imagenet-1k-id2label.json"""
lowerCAmelCase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = img_size
lowerCAmelCase = num_classes
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
return config
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
lowerCAmelCase = """encoder.""" + name
if "attn.proj" in name:
lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowerCAmelCase = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowerCAmelCase = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowerCAmelCase = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowerCAmelCase = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
lowerCAmelCase = """layernorm.weight"""
if name == "norm.bias":
lowerCAmelCase = """layernorm.bias"""
if "head" in name:
lowerCAmelCase = name.replace("""head""" , """classifier""" )
else:
lowerCAmelCase = """swinv2.""" + name
return name
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase = key.split(""".""" )
lowerCAmelCase = int(key_split[1] )
lowerCAmelCase = int(key_split[3] )
lowerCAmelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[
dim : dim * 2
]
lowerCAmelCase = val[-dim:]
else:
lowerCAmelCase = val
return orig_state_dict
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
lowerCAmelCase = get_swinva_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
lowerCAmelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
lowerCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
lowerCAmelCase = timm_model(inputs["""pixel_values"""] )
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 344
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """gptj"""
_a = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase=50_400 , lowerCAmelCase=2_048 , lowerCAmelCase=4_096 , lowerCAmelCase=28 , lowerCAmelCase=16 , lowerCAmelCase=64 , lowerCAmelCase=None , lowerCAmelCase="gelu_new" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=1e-5 , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=50_256 , lowerCAmelCase=50_256 , lowerCAmelCase=False , **lowerCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
_lowercase =vocab_size
_lowercase =n_positions
_lowercase =n_embd
_lowercase =n_layer
_lowercase =n_head
_lowercase =n_inner
_lowercase =rotary_dim
_lowercase =activation_function
_lowercase =resid_pdrop
_lowercase =embd_pdrop
_lowercase =attn_pdrop
_lowercase =layer_norm_epsilon
_lowercase =initializer_range
_lowercase =use_cache
_lowercase =bos_token_id
_lowercase =eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase , lowerCAmelCase = "default" , lowerCAmelCase = None , lowerCAmelCase = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase , task=lowerCAmelCase , patching_specs=lowerCAmelCase , use_past=lowerCAmelCase )
if not getattr(self._config , 'pad_token_id' , lowerCAmelCase ):
# TODO: how to do that better?
_lowercase =0
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_lowercase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
_lowercase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowercase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def A__ ( self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase =super(lowerCAmelCase , self ).generate_dummy_inputs(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
_lowercase =OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase =seqlen + 2
_lowercase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowercase =[
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
_lowercase =common_inputs['attention_mask']
if self.use_past:
_lowercase =ordered_inputs['attention_mask'].dtype
_lowercase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A__ ( self ) -> int:
'''simple docstring'''
return 13
| 291
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """timesformer"""
def __init__( self , lowerCAmelCase=224 , lowerCAmelCase=16 , lowerCAmelCase=3 , lowerCAmelCase=8 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3_072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-6 , lowerCAmelCase=True , lowerCAmelCase="divided_space_time" , lowerCAmelCase=0 , **lowerCAmelCase , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =num_frames
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =qkv_bias
_lowercase =attention_type
_lowercase =drop_path_rate
| 291
| 1
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCAmelCase_ = '''examples/'''
lowerCAmelCase_ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCAmelCase_ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCAmelCase_ = '''README.md'''
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_SCREAMING_SNAKE_CASE : Dict = f.read()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = REPLACE_PATTERNS[pattern]
_SCREAMING_SNAKE_CASE : List[Any] = replace.replace("""VERSION""" , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = re_pattern.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
for folder, directories, fnames in os.walk(__SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , pattern="""examples""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> Optional[int]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = """🤗 Transformers currently provides the following architectures"""
_SCREAMING_SNAKE_CASE : Any = """1. Want to contribute a new model?"""
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_SCREAMING_SNAKE_CASE : Any = f.readlines()
# Find the start of the list.
_SCREAMING_SNAKE_CASE : Dict = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> Any:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Dict = f.read()
_SCREAMING_SNAKE_CASE : Dict = REPLACE_PATTERNS["""init"""][0].search(__SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE=False )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_SCREAMING_SNAKE_CASE : Any = default_version.base_version
elif patch:
_SCREAMING_SNAKE_CASE : Optional[Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_SCREAMING_SNAKE_CASE : Dict = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_SCREAMING_SNAKE_CASE : Union[str, Any] = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
_SCREAMING_SNAKE_CASE : Any = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__SCREAMING_SNAKE_CASE , patch=__SCREAMING_SNAKE_CASE )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def lowerCamelCase_()-> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = get_version()
_SCREAMING_SNAKE_CASE : Dict = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_SCREAMING_SNAKE_CASE : Optional[int] = current_version.base_version
# Check with the user we got that right.
_SCREAMING_SNAKE_CASE : Optional[Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__SCREAMING_SNAKE_CASE )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCAmelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 635
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635
| 1
|
import os
from math import logaa
def _snake_case ( __snake_case = "base_exp.txt" ):
_UpperCamelCase = 0
_UpperCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) ):
_UpperCamelCase , _UpperCamelCase = list(map(__snake_case , line.split(''',''' ) ) )
if x * logaa(__snake_case ) > largest:
_UpperCamelCase = x * logaa(__snake_case )
_UpperCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 10
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Tuple = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowercase : int = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__lowercase : Dict = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase_ ( ):
lowerCamelCase_ = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCamelCase_ ( _lowerCamelCase : Any ):
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Any = VOCAB_FILES_NAMES
__lowercase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase :Dict = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase_ = json.load(UpperCamelCase__ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(UpperCamelCase__ )
lowerCamelCase_ = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(UpperCamelCase__ ):
try:
lowerCamelCase_ = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(UpperCamelCase__ )
lowerCamelCase_ = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(UpperCamelCase__ )
lowerCamelCase_ = ''' '''.join(UpperCamelCase__ )
lowerCamelCase_ = word
return word
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , UpperCamelCase__ ):
lowerCamelCase_ = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = ''''''.join(UpperCamelCase__ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
lowerCamelCase_ = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase_ = token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ''' ''' + text
return (text, kwargs)
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[str]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
lowerCamelCase_ = ''' '''.join(UpperCamelCase__ )
lowerCamelCase_ = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 142
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : List[Any] = '''▁'''
A_ : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A_ : int = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A_ : Union[str, Any] = {'''vinai/bartpho-syllable''': 10_24}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Tuple="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : int="<s>" , __lowerCAmelCase : Optional[int]="<unk>" , __lowerCAmelCase : List[Any]="<pad>" , __lowerCAmelCase : Dict="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : int , ) -> None:
"""simple docstring"""
a = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
a = vocab_file
a = monolingual_vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
a = {}
a = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
a = cnt
cnt += 1
with open(__lowerCAmelCase , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
a = line.strip().split()[0]
a = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
a = len(self.fairseq_tokens_to_ids )
a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict ) -> Tuple:
"""simple docstring"""
a = self.__dict__.copy()
a = None
a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , __lowerCAmelCase : Any ) -> int:
"""simple docstring"""
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a = [self.cls_token_id]
a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def A ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : Tuple ) -> Any:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def A ( self : Tuple ) -> Tuple:
"""simple docstring"""
a = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Optional[int] , __lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def A ( self : List[Any] , __lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def A ( self : int , __lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
a = "".join(__lowerCAmelCase ).replace(__lowerCAmelCase , " " ).strip()
return out_string
def A ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(
__lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(__lowerCAmelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 32
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ : int = logging.getLogger(__name__)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, )
_UpperCAmelCase = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
a = import_module("tasks" )
try:
a = getattr(UpperCAmelCase__ , model_args.task_type )
a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a = token_classification_task.get_labels(data_args.labels )
a = dict(enumerate(UpperCAmelCase__ ) )
a = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]:
a = np.argmax(UpperCAmelCase__ , axis=2 )
a , a = preds.shape
a = [[] for _ in range(UpperCAmelCase__ )]
a = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict:
a , a = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate()
a = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
a = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a , a , a = trainer.predict(UpperCAmelCase__ )
a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
a = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
a = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32
| 1
|
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = n
UpperCamelCase__: int = [None] * self.n
UpperCamelCase__: Dict = 0 # index of the first element
UpperCamelCase__: List[str] = 0
UpperCamelCase__: Optional[int] = 0
def __len__( self: Optional[Any] ):
'''simple docstring'''
return self.size
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
return self.size == 0
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
UpperCamelCase__: List[Any] = data
UpperCamelCase__: Optional[int] = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW" )
UpperCamelCase__: Dict = self.array[self.front]
UpperCamelCase__: Union[str, Any] = None
UpperCamelCase__: int = (self.front + 1) % self.n
self.size -= 1
return temp
| 380
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__: str = logging.get_logger(__name__)
A__: Union[str, Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """deformable_detr"""
UpperCamelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self: Dict , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=None , __lowerCamelCase: Tuple=3 , __lowerCamelCase: Union[str, Any]=300 , __lowerCamelCase: Optional[Any]=1024 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: Optional[int]=1024 , __lowerCamelCase: List[str]=8 , __lowerCamelCase: Any=6 , __lowerCamelCase: Tuple=1024 , __lowerCamelCase: List[str]=8 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[str]="relu" , __lowerCamelCase: Tuple=256 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Optional[Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Any=0.02 , __lowerCamelCase: int=1.0 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: str=False , __lowerCamelCase: Any="sine" , __lowerCamelCase: Optional[Any]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: int=4 , __lowerCamelCase: int=4 , __lowerCamelCase: Any=4 , __lowerCamelCase: Any=False , __lowerCamelCase: List[str]=300 , __lowerCamelCase: Dict=False , __lowerCamelCase: str=1 , __lowerCamelCase: int=5 , __lowerCamelCase: str=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Tuple=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: Dict=0.25 , __lowerCamelCase: Dict=False , **__lowerCamelCase: str , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase__: Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__: Tuple = backbone_config.get("model_type" )
UpperCamelCase__: Optional[int] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__: Tuple = config_class.from_dict(__lowerCamelCase )
UpperCamelCase__: Dict = use_timm_backbone
UpperCamelCase__: Any = backbone_config
UpperCamelCase__: Optional[int] = num_channels
UpperCamelCase__: int = num_queries
UpperCamelCase__: List[str] = max_position_embeddings
UpperCamelCase__: Dict = d_model
UpperCamelCase__: List[Any] = encoder_ffn_dim
UpperCamelCase__: Union[str, Any] = encoder_layers
UpperCamelCase__: Tuple = encoder_attention_heads
UpperCamelCase__: Tuple = decoder_ffn_dim
UpperCamelCase__: Optional[int] = decoder_layers
UpperCamelCase__: int = decoder_attention_heads
UpperCamelCase__: Optional[Any] = dropout
UpperCamelCase__: List[str] = attention_dropout
UpperCamelCase__: List[Any] = activation_dropout
UpperCamelCase__: List[Any] = activation_function
UpperCamelCase__: Union[str, Any] = init_std
UpperCamelCase__: List[str] = init_xavier_std
UpperCamelCase__: Optional[Any] = encoder_layerdrop
UpperCamelCase__: List[str] = auxiliary_loss
UpperCamelCase__: Optional[int] = position_embedding_type
UpperCamelCase__: Optional[Any] = backbone
UpperCamelCase__: Any = use_pretrained_backbone
UpperCamelCase__: Union[str, Any] = dilation
# deformable attributes
UpperCamelCase__: Union[str, Any] = num_feature_levels
UpperCamelCase__: Optional[Any] = encoder_n_points
UpperCamelCase__: Tuple = decoder_n_points
UpperCamelCase__: Any = two_stage
UpperCamelCase__: Optional[int] = two_stage_num_proposals
UpperCamelCase__: str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCamelCase__: Any = class_cost
UpperCamelCase__: str = bbox_cost
UpperCamelCase__: List[str] = giou_cost
# Loss coefficients
UpperCamelCase__: Optional[Any] = mask_loss_coefficient
UpperCamelCase__: List[Any] = dice_loss_coefficient
UpperCamelCase__: Optional[Any] = bbox_loss_coefficient
UpperCamelCase__: List[Any] = giou_loss_coefficient
UpperCamelCase__: Dict = eos_coefficient
UpperCamelCase__: List[Any] = focal_alpha
UpperCamelCase__: Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__: Tuple = self.backbone_config.to_dict()
UpperCamelCase__: Optional[int] = self.__class__.model_type
return output
| 380
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
snake_case__ : str = logging.get_logger(__name__)
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **_UpperCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCamelCase_ = deprecated_arg[3:]
setattr(self , _UpperCAmelCase , not kwargs.pop(_UpperCAmelCase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
UpperCamelCase_ = kwargs.pop('torchscript' , self.torchscript )
UpperCamelCase_ = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
UpperCamelCase_ = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**_UpperCAmelCase )
A_ = field(default=UpperCAmelCase__ , metadata={"""help""": """Trace the models using torchscript"""} )
A_ = field(default=UpperCAmelCase__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
A_ = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def _UpperCAmelCase ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
UpperCamelCase_ = torch.device('cpu' )
UpperCamelCase_ = 0
elif is_torch_tpu_available():
UpperCamelCase_ = xm.xla_device()
UpperCamelCase_ = 0
else:
UpperCamelCase_ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCamelCase_ = torch.cuda.device_count()
return device, n_gpu
@property
def _UpperCAmelCase ( self ) -> Dict:
return is_torch_tpu_available() and self.tpu
@property
def _UpperCAmelCase ( self ) -> int:
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _UpperCAmelCase ( self ) -> "torch.device":
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.n_gpu > 0
| 701
|
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
if n == 0:
return 0
UpperCamelCase_ = float('-inf')
for i in range(1 , n + 1):
UpperCamelCase_ = max(
__lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , __lowercase))
return max_revue
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
UpperCamelCase_ = [float('-inf') for _ in range(n + 1)]
return _top_down_cut_rod_recursive(__lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase_ = float('-inf')
for i in range(1 , n + 1):
UpperCamelCase_ = max(
__lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __lowercase , __lowercase) , )
UpperCamelCase_ = max_revenue
return max_rev[n]
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase_ = [float('-inf') for _ in range(n + 1)]
UpperCamelCase_ = 0
for i in range(1 , n + 1):
UpperCamelCase_ = max_rev[i]
for j in range(1 , i + 1):
UpperCamelCase_ = max(__lowercase , prices[j - 1] + max_rev[i - j])
UpperCamelCase_ = max_revenue_i
return max_rev[n]
def _snake_case (__lowercase , __lowercase):
if n < 0:
UpperCamelCase_ = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__lowercase)
if n > len(__lowercase):
UpperCamelCase_ = (
'Each integral piece of rod must have a corresponding price. '
f"""Got n = {n} but length of prices = {len(__lowercase)}"""
)
raise ValueError(__lowercase)
def _snake_case ():
UpperCamelCase_ = [6, 10, 12, 15, 20, 23]
UpperCamelCase_ = len(__lowercase)
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase_ = 36
UpperCamelCase_ = top_down_cut_rod(__lowercase , __lowercase)
UpperCamelCase_ = bottom_up_cut_rod(__lowercase , __lowercase)
UpperCamelCase_ = naive_cut_rod_recursive(__lowercase , __lowercase)
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 618
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 61
|
import random
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = a[left_index]
lowerCAmelCase__ = left_index + 1
for j in range(left_index + 1 , lowerCAmelCase_ ):
if a[j] < pivot:
lowerCAmelCase__ , lowerCAmelCase__ = a[i], a[j]
i += 1
lowerCAmelCase__ , lowerCAmelCase__ = a[i - 1], a[left_index]
return i - 1
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ):
"""simple docstring"""
if left < right:
lowerCAmelCase__ = random.randint(lowerCAmelCase_ , right - 1 )
lowerCAmelCase__ , lowerCAmelCase__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowerCAmelCase__ = partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
quick_sort_random(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase_ , pivot_index + 1 , lowerCAmelCase_ ) # recursive quicksort to the right of the pivot point
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = input("Enter numbers separated by a comma:\n" ).strip()
lowerCAmelCase__ = [int(lowerCAmelCase_ ) for item in user_input.split("," )]
quick_sort_random(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) )
print(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 61
| 1
|
def lowerCAmelCase_ ( A_ = 50):
UpperCamelCase__: Optional[int] = [1] * (length + 1)
for row_length in range(length + 1):
for tile_length in range(2 ,5):
for tile_start in range(row_length - tile_length + 1):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"{solution() = }")
| 221
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Tuple = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """gpt_neox"""
def __init__( self: List[str] , __lowerCamelCase: List[Any]=5_0432 , __lowerCamelCase: List[Any]=6144 , __lowerCamelCase: Tuple=44 , __lowerCamelCase: Optional[Any]=64 , __lowerCamelCase: Optional[Any]=2_4576 , __lowerCamelCase: Optional[int]="gelu" , __lowerCamelCase: Tuple=0.25 , __lowerCamelCase: str=1_0000 , __lowerCamelCase: Any=0.0 , __lowerCamelCase: List[Any]=0.0 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: List[str]=2048 , __lowerCamelCase: Dict=0.02 , __lowerCamelCase: Any=1e-5 , __lowerCamelCase: Any=True , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: Dict=2 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[Any]=None , **__lowerCamelCase: Any , ):
'''simple docstring'''
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase__: List[str] = vocab_size
UpperCamelCase__: List[str] = max_position_embeddings
UpperCamelCase__: Optional[Any] = hidden_size
UpperCamelCase__: Any = num_hidden_layers
UpperCamelCase__: Optional[int] = num_attention_heads
UpperCamelCase__: Union[str, Any] = intermediate_size
UpperCamelCase__: List[str] = hidden_act
UpperCamelCase__: str = rotary_pct
UpperCamelCase__: Any = rotary_emb_base
UpperCamelCase__: int = attention_dropout
UpperCamelCase__: Optional[Any] = hidden_dropout
UpperCamelCase__: Any = classifier_dropout
UpperCamelCase__: List[str] = initializer_range
UpperCamelCase__: Optional[Any] = layer_norm_eps
UpperCamelCase__: int = use_cache
UpperCamelCase__: Optional[Any] = tie_word_embeddings
UpperCamelCase__: List[str] = use_parallel_residual
UpperCamelCase__: str = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"got {self.rope_scaling}" )
UpperCamelCase__: Any = self.rope_scaling.get("type" , __lowerCamelCase )
UpperCamelCase__: int = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 221
| 1
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase = logging.get_logger(__name__)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple:
_lowercase : Optional[int] = tesseract_config if tesseract_config is not None else ''
# apply OCR
_lowercase : Any = to_pil_image(SCREAMING_SNAKE_CASE )
_lowercase , _lowercase : Any = pil_image.size
_lowercase : Dict = pytesseract.image_to_data(SCREAMING_SNAKE_CASE , lang=SCREAMING_SNAKE_CASE , output_type='dict' , config=SCREAMING_SNAKE_CASE )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[str] = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_lowercase : Optional[Any] = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if not word.strip()]
_lowercase : Union[str, Any] = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowercase : Optional[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowercase : int = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowercase : List[str] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowercase : Optional[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowercase : Any = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
_lowercase : Union[str, Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Union[str, Any] = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = "" , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
_lowercase : Optional[Any] = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
_lowercase : int = get_size_dict(_lowerCAmelCase )
_lowercase : Dict = do_resize
_lowercase : Any = size
_lowercase : Tuple = resample
_lowercase : Optional[int] = apply_ocr
_lowercase : Optional[int] = ocr_lang
_lowercase : str = tesseract_config
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowercase : Any = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : List[Any] = (size['height'], size['width'])
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ):
_lowercase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_lowercase : Tuple = size if size is not None else self.size
_lowercase : Optional[int] = get_size_dict(_lowerCAmelCase )
_lowercase : Union[str, Any] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowercase : str = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowercase : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowercase : Tuple = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
_lowercase : Union[str, Any] = [to_numpy_array(_lowerCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
_lowercase : Any = []
_lowercase : Any = []
for image in images:
_lowercase , _lowercase : int = apply_tesseract(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
words_batch.append(_lowerCAmelCase )
boxes_batch.append(_lowerCAmelCase )
if do_resize:
_lowercase : List[Any] = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_lowercase : List[str] = [flip_channel_order(_lowerCAmelCase ) for image in images]
_lowercase : int = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowercase : Optional[Any] = BatchFeature(data={'pixel_values': images} , tensor_type=_lowerCAmelCase )
if apply_ocr:
_lowercase : Optional[Any] = words_batch
_lowercase : Dict = boxes_batch
return data
| 66
|
from PIL import Image
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Image:
def brightness(SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 66
| 1
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=_SCREAMING_SNAKE_CASE , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=_SCREAMING_SNAKE_CASE , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=_SCREAMING_SNAKE_CASE , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=_SCREAMING_SNAKE_CASE , default=0 , help="""cuda_id.""" , )
__a = parser.parse_args()
return args
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
__a , __a = imgs[0].size
__a = Image.new("""RGB""" , size=(cols * w, rows * h) )
__a , __a = grid.size
for i, img in enumerate(_SCREAMING_SNAKE_CASE ):
grid.paste(_SCREAMING_SNAKE_CASE , box=(i % cols * w, i // cols * h) )
return grid
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int="robotic cat with wings" , _SCREAMING_SNAKE_CASE : Optional[int]=7.5 , _SCREAMING_SNAKE_CASE : str=50 , _SCREAMING_SNAKE_CASE : Tuple=1 , _SCREAMING_SNAKE_CASE : int=42 , ):
"""simple docstring"""
__a = torch.Generator(pipeline.device ).manual_seed(_SCREAMING_SNAKE_CASE )
__a = pipeline(
_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , ).images
__a = int(math.sqrt(_SCREAMING_SNAKE_CASE ) )
__a = image_grid(_SCREAMING_SNAKE_CASE , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCamelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCamelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
lowerCamelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
lowerCamelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
lowerCamelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
lowerCamelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCamelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
lowerCamelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
lowerCamelCase__ = unet.to(torch.device("""cuda""", args.cuda_id))
lowerCamelCase__ = pipeline.to(unet.device)
lowerCamelCase__ , lowerCamelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
lowerCamelCase__ = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 547
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Tuple , __lowercase : int = 16 , __lowercase : int = 88 , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : int = 1 , __lowercase : float = 0.0 , __lowercase : int = 32 , __lowercase : Optional[int] = None , __lowercase : bool = False , __lowercase : Optional[int] = None , __lowercase : str = "geglu" , __lowercase : bool = True , __lowercase : bool = True , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = in_channels
__a = torch.nn.GroupNorm(num_groups=__lowercase , num_channels=__lowercase , eps=1E-6 , affine=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
# 3. Define transformers blocks
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , cross_attention_dim=__lowercase , activation_fn=__lowercase , attention_bias=__lowercase , double_self_attention=__lowercase , norm_elementwise_affine=__lowercase , )
for d in range(__lowercase )
] )
__a = nn.Linear(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Optional[Any] , __lowercase : str=None , __lowercase : List[Any]=None , __lowercase : List[Any]=None , __lowercase : int=1 , __lowercase : Union[str, Any]=None , __lowercase : bool = True , ):
'''simple docstring'''
__a , __a , __a , __a = hidden_states.shape
__a = batch_frames // num_frames
__a = hidden_states
__a = hidden_states[None, :].reshape(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__a = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__a = self.norm(__lowercase )
__a = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowercase , __lowercase )
__a = self.proj_in(__lowercase )
# 2. Blocks
for block in self.transformer_blocks:
__a = block(
__lowercase , encoder_hidden_states=__lowercase , timestep=__lowercase , cross_attention_kwargs=__lowercase , class_labels=__lowercase , )
# 3. Output
__a = self.proj_out(__lowercase )
__a = (
hidden_states[None, None, :]
.reshape(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__a = hidden_states.reshape(__lowercase , __lowercase , __lowercase , __lowercase )
__a = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowercase )
| 547
| 1
|
import socket
def A__ ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_UpperCAmelCase = socket.gethostname()
_UpperCAmelCase = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
_UpperCAmelCase = sock.recv(10_24 )
if not data:
break
out_file.write(SCREAMING_SNAKE_CASE_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 32
|
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 32
| 1
|
"""simple docstring"""
_lowerCAmelCase : int = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowerCAmelCase : list[bool | None] = [None] * 1000_0000
_lowerCAmelCase : Any = True
_lowerCAmelCase : Any = False
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCamelCase : Union[str, Any] = chain(next_number(_lowerCamelCase ) )
_lowerCamelCase : Tuple = number_chain
while number < 10000000:
_lowerCamelCase : Optional[Any] = number_chain
number *= 10
return number_chain
def lowerCamelCase_( _lowerCamelCase = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , _lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 716
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 10 , _lowerCamelCase = 22 ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = range(1 , _lowerCamelCase )
_lowerCamelCase : Tuple = range(1 , _lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 386
| 0
|
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase__) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 320
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __snake_case :
def __init__( self, A, A, A = 0 ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : str = row, column
lowerCamelCase : List[str] = [[default_value for c in range(A )] for r in range(A )]
def __str__( self ):
"""simple docstring"""
lowerCamelCase : Any = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCamelCase : List[Any] = 0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase : List[Any] = max(A, len(str(A ) ) )
lowerCamelCase : int = F'''%{max_element_length}s'''
# Make string and return
def single_line(A ) -> str:
nonlocal string_format_identifier
lowerCamelCase : List[str] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(A ) for row_vector in self.array )
return s
def __repr__( self ):
"""simple docstring"""
return str(self )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
if not (isinstance(A, (list, tuple) ) and len(A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, A ):
"""simple docstring"""
assert self.validate_indicies(A )
return self.array[loc[0]][loc[1]]
def __setitem__( self, A, A ):
"""simple docstring"""
assert self.validate_indicies(A )
lowerCamelCase : List[Any] = value
def __add__( self, A ):
"""simple docstring"""
assert isinstance(A, A )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase : Optional[int] = Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self ):
"""simple docstring"""
lowerCamelCase : List[str] = Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase : Union[str, Any] = -self[r, c]
return result
def __sub__( self, A ):
"""simple docstring"""
return self + (-another)
def __mul__( self, A ):
"""simple docstring"""
if isinstance(A, (int, float) ): # Scalar multiplication
lowerCamelCase : List[Any] = Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(A, A ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase : List[Any] = Matrix(self.row, another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase : Tuple = F'''Unsupported type given for another ({type(A )})'''
raise TypeError(A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = Matrix(self.column, self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase : str = self[r, c]
return result
def UpperCAmelCase_ ( self, A, A ):
"""simple docstring"""
assert isinstance(A, A ) and isinstance(A, A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase : Union[str, Any] = v.transpose()
lowerCamelCase : Optional[int] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase ( ):
# a^(-1)
lowerCamelCase : Optional[Any] = Matrix(3 , 3 , 0)
for i in range(3):
lowerCamelCase : Union[str, Any] = 1
print(F'''a^(-1) is {ainv}''')
# u, v
lowerCamelCase : Dict = Matrix(3 , 1 , 0)
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = 1, 2, -3
lowerCamelCase : Tuple = Matrix(3 , 1 , 0)
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = 4, -2, 5
print(F'''u is {u}''')
print(F'''v is {v}''')
print(F'''uv^T is {u * v.transpose()}''')
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCAmelCase__ , UpperCAmelCase__)}''')
def UpperCAmelCase ( ):
import doctest
doctest.testmod()
testa()
| 320
| 1
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_UpperCamelCase = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,A=None ,A=None ,A=None ,**A ):
super().__init__(*A ,**A )
UpperCAmelCase = eval_examples
UpperCAmelCase = post_process_function
UpperCAmelCase = quant_trainer_args
UpperCAmelCase = 128 # default number of calibration samples
def _UpperCamelCase ( self ,A=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
UpperCAmelCase = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCAmelCase = self._remove_unused_columns(A ,description="""Calibration""" )
return DataLoader(
A ,batch_size=self.args.eval_batch_size ,collate_fn=self.data_collator ,drop_last=self.args.dataloader_drop_last ,num_workers=self.args.dataloader_num_workers ,pin_memory=self.args.dataloader_pin_memory ,shuffle=A ,)
def _UpperCamelCase ( self ,A=None ):
UpperCAmelCase = self.train_dataset if calib_dataset is None else calib_dataset
UpperCAmelCase = self.get_calib_dataloader(A )
UpperCAmelCase = self.model
quant_trainer.configure_model(A ,self.quant_trainer_args ,calib=A )
model.eval()
quant_trainer.enable_calibration(A )
logger.info("""***** Running calibration *****""" )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(A ):
# Prediction step
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.prediction_step(A ,A ,prediction_loss_only=A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A ,self.quant_trainer_args )
UpperCAmelCase = model
def _UpperCamelCase ( self ,A=None ,A=None ,A=None ,A = "eval" ):
UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase = self.get_eval_dataloader(A )
UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase = eval_loop(
A ,description="""Evaluation""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A ,)
finally:
UpperCAmelCase = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCAmelCase = self.post_process_function(A ,A ,output.predictions )
UpperCAmelCase = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(A )
self.log(A )
else:
UpperCAmelCase = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A )
return metrics
def _UpperCamelCase ( self ,A ,A ,A=None ,A = "test" ):
UpperCAmelCase = self.get_test_dataloader(A )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase = eval_loop(
A ,description="""Prediction""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A ,)
finally:
UpperCAmelCase = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase = self.post_process_function(A ,A ,output.predictions ,"""predict""" )
UpperCAmelCase = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(A )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A )
def _UpperCamelCase ( self ,A="./" ):
UpperCAmelCase = self.eval_dataset
UpperCAmelCase = self.get_eval_dataloader(A )
UpperCAmelCase = next(iter(A ) )
# saving device - to make it consistent
UpperCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
UpperCAmelCase = tuple(v.to(A ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
UpperCAmelCase = True
UpperCAmelCase = self.model.to(A )
model.eval()
model.float()
UpperCAmelCase = model.module if hasattr(A ,"""module""" ) else model
quant_trainer.configure_model(A ,self.quant_trainer_args )
UpperCAmelCase = os.path.join(A ,"""model.onnx""" )
logger.info(F'''exporting model to {output_model_file}''' )
UpperCAmelCase = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
A ,A ,A ,export_params=A ,opset_version=13 ,do_constant_folding=A ,input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] ,output_names=["""output_start_logits""", """output_end_logits"""] ,dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} ,verbose=A ,)
logger.info("""onnx export finished""" )
| 714
|
"""simple docstring"""
def _a ( _snake_case ): # noqa: E741
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
UpperCAmelCase = 0
UpperCAmelCase = [0] * n
UpperCAmelCase = [False] * n
UpperCAmelCase = [False] * n
def dfs(_snake_case , _snake_case , _snake_case , _snake_case ):
if parent == root:
out_edge_count += 1
UpperCAmelCase = True
UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase = True
else:
UpperCAmelCase = min(low[at] , _snake_case )
return out_edge_count
for i in range(_snake_case ):
if not visited[i]:
UpperCAmelCase = 0
UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case )
UpperCAmelCase = out_edge_count > 1
for x in range(len(_snake_case ) ):
if is_art[x] is True:
print(_snake_case )
# Adjacency list of graph
_UpperCamelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 74
| 0
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(UpperCamelCase ) , UpperCamelCase )
return number - int(UpperCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 77
|
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list[int]:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = len(__snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase__ = i + 1
else:
lowerCamelCase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 481
| 0
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : List[str] = {'vocab_file': 'spiece.model'}
_snake_case : Any = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
_snake_case : Dict = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : List[str]="<s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Dict="[SEP]" , lowerCAmelCase_ : int="[MASK]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> None:
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def lowercase ( self : Tuple ) -> Optional[int]:
return self.sp_model.get_piece_size()
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> Tuple:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self : List[str] , lowerCAmelCase_ : Dict ) -> int:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : int ) -> Any:
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
__lowerCAmelCase = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Any ) -> Any:
__lowerCAmelCase = []
__lowerCAmelCase = ''
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
__lowerCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : str , ) -> str:
__lowerCAmelCase = kwargs.pop('use_source_tokenizer' , lowerCAmelCase_ )
__lowerCAmelCase = self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowerCAmelCase = []
__lowerCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
__lowerCAmelCase = []
sub_texts.append(lowerCAmelCase_ )
else:
current_sub_text.append(lowerCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__lowerCAmelCase = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(lowerCAmelCase_ ) )
else:
__lowerCAmelCase = ''.join(lowerCAmelCase_ )
__lowerCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowerCAmelCase = self.clean_up_tokenization(lowerCAmelCase_ )
return clean_text
else:
return text
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def lowercase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 421
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
__lowerCAmelCase = torch.permute(lowerCAmelCase_, (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase_ ):
# linear layer
__lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
__lowerCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any ):
if "metadata" in layer:
__lowerCAmelCase = layer.split('metadata' )
__lowerCAmelCase = ''.join(split_layer[0] )[:-1]
__lowerCAmelCase = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
__lowerCAmelCase = layer.split('kvstore' )
__lowerCAmelCase = ''.join(split_layer[0] )[:-1]
__lowerCAmelCase = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
__lowerCAmelCase = layer.split('/' )
__lowerCAmelCase = '/'.join(split_layer[:-1] )
__lowerCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowerCAmelCase = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
__lowerCAmelCase = 'file'
else:
__lowerCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = rename_keys(lowerCAmelCase_ )
__lowerCAmelCase = {}
for k, v in current_block.items():
__lowerCAmelCase = v
__lowerCAmelCase = new_current_block
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Dict, lowerCAmelCase_ : str = WEIGHTS_NAME ):
__lowerCAmelCase = convert_file_size_to_int(lowerCAmelCase_ )
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint', 'rb' ) as fp:
__lowerCAmelCase = serialization.msgpack_restore(fp.read() )['optimizer']['target']
__lowerCAmelCase = flatten_dict(lowerCAmelCase_, sep='/' )
__lowerCAmelCase = {}
for layer in checkpoint_info.keys():
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_key_and_tensorstore_dict(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if curr_real_layer_name in all_layers:
__lowerCAmelCase = content
else:
__lowerCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowerCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowerCAmelCase = torch.tensor(lowerCAmelCase_ )
__lowerCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowerCAmelCase , __lowerCAmelCase = rename_base_flax_keys(tuple(key.split('/' ) ), lowerCAmelCase_ )
__lowerCAmelCase = '/'.join(lowerCAmelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowerCAmelCase = os.path.join(
lowerCAmelCase_, weights_name.replace('.bin', F"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_, lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = raw_weights.to(getattr(lowerCAmelCase_, lowerCAmelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowerCAmelCase = os.path.join(lowerCAmelCase_, weights_name.replace('.bin', F"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_, lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCAmelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowerCAmelCase = {}
__lowerCAmelCase = {}
for idx, shard in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = weights_name.replace(
'.bin', F"""-{idx+1:05d}-of-{len(lowerCAmelCase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
__lowerCAmelCase = os.path.join(lowerCAmelCase_, weights_name.replace('.bin', F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowerCAmelCase_, os.path.join(lowerCAmelCase_, lowerCAmelCase_ ) )
__lowerCAmelCase = shard
for key in shard:
__lowerCAmelCase = shard_file
# Add the metadata
__lowerCAmelCase = {'total_size': total_size}
__lowerCAmelCase = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowerCAmelCase_, lowerCAmelCase_ ), 'w', encoding='utf-8' ) as f:
__lowerCAmelCase = json.dumps(lowerCAmelCase_, indent=2, sort_keys=lowerCAmelCase_ ) + '\n'
f.write(lowerCAmelCase_ )
return metadata, index
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_snake_case : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a_ ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowerCAmelCase = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
__lowerCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted', device_map='auto' )
__lowerCAmelCase = TaTokenizer.from_pretrained('t5-small' )
__lowerCAmelCase = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
__lowerCAmelCase = tokenizer(lowerCAmelCase_, return_tensors='pt' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_, decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 421
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Dict=7 , UpperCAmelCase : int=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=99 , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : Dict=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : List[str]=37 , UpperCAmelCase : Tuple="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Optional[Any]=16 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=0.0_2 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =parent
lowercase : Tuple =batch_size
lowercase : Dict =seq_length
lowercase : List[Any] =is_training
lowercase : int =use_input_mask
lowercase : List[Any] =use_token_type_ids
lowercase : List[Any] =use_labels
lowercase : Any =vocab_size
lowercase : List[Any] =hidden_size
lowercase : str =num_hidden_layers
lowercase : Dict =num_attention_heads
lowercase : Union[str, Any] =intermediate_size
lowercase : Union[str, Any] =hidden_act
lowercase : List[Any] =hidden_dropout_prob
lowercase : str =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Dict =type_vocab_size
lowercase : List[Any] =type_sequence_label_size
lowercase : List[str] =initializer_range
lowercase : Union[str, Any] =num_labels
lowercase : List[str] =num_choices
lowercase : Union[str, Any] =scope
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Dict =None
if self.use_input_mask:
lowercase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : int =None
lowercase : int =None
if self.use_labels:
lowercase : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Any =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Tuple =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A__ ( self : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : int =DistilBertModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : List[str] =model(UpperCAmelCase , UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] =DistilBertForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =DistilBertForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Any =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
lowercase : Tuple =self.num_labels
lowercase : int =DistilBertForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Any =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.num_labels
lowercase : List[str] =DistilBertForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Optional[int] =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple =self.num_choices
lowercase : Any =DistilBertForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Union[str, Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : str =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : str =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : List[str] =config_and_inputs
lowercase : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = True
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : List[Any] =DistilBertModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> str:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
@slow
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[str] =DistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@slow
@require_torch_gpu
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase , lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase : Optional[Any] =True
lowercase : Tuple =model_class(config=UpperCAmelCase )
lowercase : Tuple =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowercase : Tuple =torch.jit.trace(
UpperCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase , os.path.join(UpperCAmelCase , '''traced_model.pt''' ) )
lowercase : Any =torch.jit.load(os.path.join(UpperCAmelCase , '''traced_model.pt''' ) , map_location=UpperCAmelCase )
loaded(inputs_dict['''input_ids'''].to(UpperCAmelCase ) , inputs_dict['''attention_mask'''].to(UpperCAmelCase ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : List[str] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : List[Any] =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Any =model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
lowercase : Union[str, Any] =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Union[str, Any] =torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1e-4 ) )
| 94
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase="pt" ):
'''simple docstring'''
_lowerCAmelCase : str = {'add_prefix_space': True} if isinstance(_lowerCamelCase , _lowerCamelCase ) and not line.startswith(' ' ) else {}
_lowerCAmelCase : List[str] = padding_side
return tokenizer(
[line] , max_length=_lowerCamelCase , padding='max_length' if pad_to_max_length else None , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase : str = input_ids.ne(_lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A ,_A ,_A="train" ,_A=None ,_A=None ,_A=None ,_A="" ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = Path(_A ).joinpath(type_path + '.source' )
_lowerCAmelCase : Optional[int] = Path(_A ).joinpath(type_path + '.target' )
_lowerCAmelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCAmelCase : Tuple = max_source_length
_lowerCAmelCase : Union[str, Any] = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_lowerCAmelCase : Dict = tokenizer
_lowerCAmelCase : List[Any] = prefix
if n_obs is not None:
_lowerCAmelCase : int = self.src_lens[:n_obs]
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Any = tgt_lang
def __len__( self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = index + 1 # linecache starts at 1
_lowerCAmelCase : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) ,_A ).rstrip('\n' )
_lowerCAmelCase : Optional[int] = linecache.getline(str(self.tgt_file ) ,_A ).rstrip('\n' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCAmelCase : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_A ) else self.tokenizer
)
_lowerCAmelCase : Dict = self.tokenizer.generator if isinstance(self.tokenizer ,_A ) else self.tokenizer
_lowerCAmelCase : Union[str, Any] = encode_line(_A ,_A ,self.max_source_length ,'right' )
_lowerCAmelCase : Optional[int] = encode_line(_A ,_A ,self.max_target_length ,'right' )
_lowerCAmelCase : Tuple = source_inputs['input_ids'].squeeze()
_lowerCAmelCase : int = target_inputs['input_ids'].squeeze()
_lowerCAmelCase : Optional[Any] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
return [len(_A ) for x in Path(_A ).open().readlines()]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.stack([x['input_ids'] for x in batch] )
_lowerCAmelCase : List[Any] = torch.stack([x['attention_mask'] for x in batch] )
_lowerCAmelCase : Any = torch.stack([x['decoder_input_ids'] for x in batch] )
_lowerCAmelCase : List[str] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
_lowerCAmelCase : List[str] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
_lowerCAmelCase : int = trim_batch(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = trim_batch(_A ,_A ,attention_mask=_A )
_lowerCAmelCase : List[str] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_git_info()
save_json(_lowerCamelCase , os.path.join(_lowerCamelCase , 'git_log.json' ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=4 , **_lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=_lowerCamelCase , **_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
return json.load(_lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = git.Repo(search_parent_directories=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = {
'repo_id': str(_lowerCamelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return list(map(_lowerCamelCase , _lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , 'wb' ) as f:
return pickle.dump(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def remove_articles(_lowerCamelCase ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _lowerCamelCase )
def white_space_fix(_lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = normalize_answer(_lowerCamelCase ).split()
_lowerCAmelCase : List[str] = normalize_answer(_lowerCamelCase ).split()
_lowerCAmelCase : Optional[Any] = Counter(_lowerCamelCase ) & Counter(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sum(common.values() )
if num_same == 0:
return 0
_lowerCAmelCase : Any = 1.0 * num_same / len(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = 1.0 * num_same / len(_lowerCamelCase )
_lowerCAmelCase : str = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = 0
for hypo, pred in zip(_lowerCamelCase , _lowerCamelCase ):
em += exact_match_score(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
em /= len(_lowerCamelCase )
return {"em": em}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCAmelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not hasattr(_lowerCamelCase , _lowerCamelCase ) and not hasattr(_lowerCamelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowerCamelCase ) )
delattr(_lowerCamelCase , _lowerCamelCase )
continue
_lowerCAmelCase : Optional[Any] = p if hasattr(_lowerCamelCase , _lowerCamelCase ) else equivalent_param[p]
setattr(_lowerCamelCase , _lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
delattr(_lowerCamelCase , _lowerCamelCase )
return hparams, config
| 259
| 0
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : Optional[int] , a : List[Any]=13 , a : Optional[int]=7 , a : List[str]=True , a : int=True , a : List[Any]=True , a : Optional[int]=True , a : List[str]=99 , a : Dict=24 , a : List[str]=2 , a : Optional[Any]=6 , a : int=37 , a : Optional[Any]="gelu" , a : Optional[Any]=0.1 , a : List[Any]=0.1 , a : Any=5_12 , a : str=16 , a : List[str]=2 , a : Optional[int]=0.02 , a : Tuple=3 , a : Union[str, Any]=None , a : Any=10_00 , ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : Any = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : int = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = scope
SCREAMING_SNAKE_CASE__ : Any = range_bbox
def A_ ( self : Tuple ) ->int:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE__ : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE__ : str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE__ : Dict = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE__ : List[str] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox[i, j, 0]
SCREAMING_SNAKE_CASE__ : List[Any] = t
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A_ ( self : int ) ->Optional[int]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A_ ( self : Any , a : Union[str, Any] , a : Any , a : Dict , a : int , a : Union[str, Any] , a : Optional[Any] , a : Optional[int] , ) ->str:
SCREAMING_SNAKE_CASE__ : Dict = LiltModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Any = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : int = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : int , a : str , a : Any , a : Tuple , a : Union[str, Any] , a : Optional[int] , a : str , a : List[Any] , ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LiltForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Any , a : List[str] , a : Dict , a : str , a : List[str] , a : int , a : List[Any] , a : List[str] , ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = LiltForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : int ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Any = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _a ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
def A_ ( self : str , a : int , a : List[str] , a : Dict , a : int , a : Optional[int] ) ->Optional[int]:
return True
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = LiltModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def A_ ( self : str ) ->List[Any]:
self.config_tester.run_common_tests()
def A_ ( self : List[str] ) ->int:
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : int = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def A_ ( self : List[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def A_ ( self : Union[str, Any] ) ->Optional[int]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Dict = LiltModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[int] ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[1, 2]] , device=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(input_ids=UpperCAmelCase_ , bbox=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 2, 7_68] )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=UpperCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase_ , atol=1E-3 ) )
| 702
|
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26
| 0
|
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCAmelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = 42
__UpperCAmelCase : Union[str, Any] = jnp.floataa
__UpperCAmelCase : List[Any] = True
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().setup()
_a : List[str] = nn.Dense(5 ,dtype=self.dtype )
def __call__( self : List[Any] ,*_a : int ,**_a : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = super().__call__(*__A ,**__A )
_a : Tuple = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCAmelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = FlaxBigBirdForNaturalQuestionsModule
def UpperCAmelCase_ (__a : List[Any] , __a : List[Any] , __a : Optional[int] , __a : Tuple , __a : Union[str, Any] , __a : Tuple ):
"""simple docstring"""
def cross_entropy(__a : List[str] , __a : List[str] , __a : List[str]=None ):
_a : List[Any] = logits.shape[-1]
_a : Dict = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype('f4' )
_a : str = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 )
_a : Optional[Any] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_a : List[Any] = reduction(UpperCamelCase__ )
return loss
_a : Optional[Any] = partial(UpperCamelCase__ , reduction=jnp.mean )
_a : Optional[Any] = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
_a : str = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
_a : List[str] = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = '''google/bigbird-roberta-base'''
__UpperCAmelCase : Optional[Any] = 3000
__UpperCAmelCase : List[Any] = 1_0500
__UpperCAmelCase : Optional[int] = 128
__UpperCAmelCase : int = 3
__UpperCAmelCase : Any = 1
__UpperCAmelCase : List[Any] = 5
# tx_args
__UpperCAmelCase : Optional[int] = 3e-5
__UpperCAmelCase : List[Any] = 0.0
__UpperCAmelCase : Tuple = 2_0000
__UpperCAmelCase : Any = 0.0_0_9_5
__UpperCAmelCase : int = '''bigbird-roberta-natural-questions'''
__UpperCAmelCase : Any = '''training-expt'''
__UpperCAmelCase : int = '''data/nq-training.jsonl'''
__UpperCAmelCase : int = '''data/nq-validation.jsonl'''
def __lowercase ( self : str ):
'''simple docstring'''
os.makedirs(self.base_dir ,exist_ok=__A )
_a : int = os.path.join(self.base_dir ,self.save_dir )
_a : str = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[int] = 42
__UpperCAmelCase : int = 4096 # no dynamic padding on TPUs
def __call__( self : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
_a : str = self.collate_fn(__A )
_a : List[str] = jax.tree_util.tree_map(__A ,__A )
return batch
def __lowercase ( self : List[Any] ,_a : Union[str, Any] ):
'''simple docstring'''
_a, _a : Optional[int] = self.fetch_inputs(features['input_ids'] )
_a : List[str] = {
'input_ids': jnp.array(__A ,dtype=jnp.intaa ),
'attention_mask': jnp.array(__A ,dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] ,dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] ,dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] ,dtype=jnp.intaa ),
}
return batch
def __lowercase ( self : Tuple ,_a : list ):
'''simple docstring'''
_a : Dict = [self._fetch_inputs(__A ) for ids in input_ids]
return zip(*__A )
def __lowercase ( self : List[str] ,_a : list ):
'''simple docstring'''
_a : str = [1 for _ in range(len(__A ) )]
while len(__A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def UpperCAmelCase_ (__a : str , __a : List[str] , __a : Optional[Any]=None ):
"""simple docstring"""
if seed is not None:
_a : Union[str, Any] = dataset.shuffle(seed=UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) // batch_size ):
_a : Dict = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase__ )
@partial(jax.pmap , axis_name='batch' )
def UpperCAmelCase_ (__a : Dict , __a : Optional[int] , **__a : Optional[int] ):
"""simple docstring"""
def loss_fn(__a : List[Any] ):
_a : Optional[int] = model_inputs.pop('start_labels' )
_a : int = model_inputs.pop('end_labels' )
_a : Optional[int] = model_inputs.pop('pooled_labels' )
_a : int = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ )
_a, _a, _a : str = outputs
return state.loss_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
_a, _a : Dict = jax.random.split(UpperCamelCase__ )
_a : int = jax.value_and_grad(UpperCamelCase__ )
_a, _a : str = grad_fn(state.params )
_a : Any = jax.lax.pmean({'loss': loss} , axis_name='batch' )
_a : int = jax.lax.pmean(UpperCamelCase__ , 'batch' )
_a : Optional[int] = state.apply_gradients(grads=UpperCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def UpperCAmelCase_ (__a : Optional[Any] , **__a : Dict ):
"""simple docstring"""
_a : Dict = model_inputs.pop('start_labels' )
_a : Dict = model_inputs.pop('end_labels' )
_a : Union[str, Any] = model_inputs.pop('pooled_labels' )
_a : int = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ )
_a, _a, _a : str = outputs
_a : str = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_a : Optional[int] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class UpperCAmelCase__ ( train_state.TrainState ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Dict = 42
__UpperCAmelCase : Optional[int] = 42
__UpperCAmelCase : List[str] = 42
__UpperCAmelCase : Tuple = 42
__UpperCAmelCase : Any = 42
__UpperCAmelCase : Tuple = 42
__UpperCAmelCase : Any = None
def __lowercase ( self : List[Any] ,_a : str ,_a : str ,_a : str ,_a : Tuple=None ):
'''simple docstring'''
_a : Tuple = model.params
_a : Union[str, Any] = TrainState.create(
apply_fn=model.__call__ ,params=__A ,tx=__A ,loss_fn=__A ,)
if ckpt_dir is not None:
_a, _a, _a, _a, _a : Tuple = restore_checkpoint(__A ,__A )
_a : List[Any] = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
_a, _a : str = build_tx(**__A )
_a : Union[str, Any] = train_state.TrainState(
step=__A ,apply_fn=model.__call__ ,params=__A ,tx=__A ,opt_state=__A ,)
_a : Tuple = args
_a : Optional[int] = data_collator
_a : Optional[Any] = lr
_a : Union[str, Any] = params
_a : Any = jax_utils.replicate(__A )
return state
def __lowercase ( self : Optional[Any] ,_a : Optional[int] ,_a : int ,_a : int ):
'''simple docstring'''
_a : Union[str, Any] = self.args
_a : str = len(__A ) // args.batch_size
_a : Optional[Any] = jax.random.PRNGKey(0 )
_a : str = jax.random.split(__A ,jax.device_count() )
for epoch in range(args.max_epochs ):
_a : List[Any] = jnp.array(0 ,dtype=jnp.floataa )
_a : int = get_batched_dataset(__A ,args.batch_size ,seed=__A )
_a : Optional[int] = 0
for batch in tqdm(__A ,total=__A ,desc=F"""Running EPOCH-{epoch}""" ):
_a : List[str] = self.data_collator(__A )
_a, _a, _a : List[str] = self.train_step_fn(__A ,__A ,**__A )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
_a : Optional[int] = jax_utils.unreplicate(state.step )
_a : Tuple = running_loss.item() / i
_a : int = self.scheduler_fn(state_step - 1 )
_a : Dict = self.evaluate(__A ,__A )
_a : Dict = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(__A ) )
self.logger.log(__A ,commit=__A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" ,state=__A )
def __lowercase ( self : List[str] ,_a : Dict ,_a : str ):
'''simple docstring'''
_a : str = get_batched_dataset(__A ,self.args.batch_size )
_a : int = len(__A ) // self.args.batch_size
_a : Optional[int] = jnp.array(0 ,dtype=jnp.floataa )
_a : str = 0
for batch in tqdm(__A ,total=__A ,desc='Evaluating ... ' ):
_a : Dict = self.data_collator(__A )
_a : int = self.val_step_fn(__A ,**__A )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def __lowercase ( self : List[Any] ,_a : Any ,_a : Dict ):
'''simple docstring'''
_a : int = jax_utils.unreplicate(__A )
print(F"""SAVING CHECKPOINT IN {save_dir}""" ,end=' ... ' )
self.model_save_fn(__A ,params=state.params )
with open(os.path.join(__A ,'opt_state.msgpack' ) ,'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(__A ,'args.joblib' ) )
joblib.dump(self.data_collator ,os.path.join(__A ,'data_collator.joblib' ) )
with open(os.path.join(__A ,'training_state.json' ) ,'w' ) as f:
json.dump({'step': state.step.item()} ,__A )
print('DONE' )
def UpperCAmelCase_ (__a : int , __a : Optional[Any] ):
"""simple docstring"""
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=' ... ' )
with open(os.path.join(UpperCamelCase__ , 'flax_model.msgpack' ) , 'rb' ) as f:
_a : int = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase__ , 'opt_state.msgpack' ) , 'rb' ) as f:
_a : int = from_bytes(state.opt_state , f.read() )
_a : List[str] = joblib.load(os.path.join(UpperCamelCase__ , 'args.joblib' ) )
_a : Union[str, Any] = joblib.load(os.path.join(UpperCamelCase__ , 'data_collator.joblib' ) )
with open(os.path.join(UpperCamelCase__ , 'training_state.json' ) , 'r' ) as f:
_a : Dict = json.load(UpperCamelCase__ )
_a : Dict = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def UpperCAmelCase_ (__a : int , __a : Union[str, Any] , __a : List[Any] , __a : Dict ):
"""simple docstring"""
_a : List[Any] = num_train_steps - warmup_steps
_a : List[str] = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ )
_a : List[Any] = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ )
_a : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCAmelCase_ (__a : List[str] , __a : Union[str, Any] , __a : Tuple , __a : Tuple , __a : Tuple ):
"""simple docstring"""
def weight_decay_mask(__a : Any ):
_a : Tuple = traverse_util.flatten_dict(UpperCamelCase__ )
_a : Tuple = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase__ )
_a : Any = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_a : Union[str, Any] = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ )
return tx, lr
| 229
|
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 6
| 0
|
'''simple docstring'''
import sys
import turtle
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] , __snake_case : tuple[float, float] , __snake_case : int , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowercase__ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowercase__ : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 711
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base')
__A : int = AutoTokenizer.from_pretrained('xlm-roberta-base')
__A : List[str] = 'The dog is cute and lives in the garden house'
__A : int = jnp.array([tokenizer.encode(_UpperCAmelCase)])
__A : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
__A : Optional[Any] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]])
__A : Union[str, Any] = model(_UpperCAmelCase)['last_hidden_state']
self.assertEqual(output.shape , _UpperCAmelCase)
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3))
| 338
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : int = logging.get_logger(__name__)
_A : List[Any] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = """yolos"""
def __init__( self , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[5_12, 8_64] , A_=16 , A_=3 , A_=True , A_=1_00 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = num_detection_tokens
SCREAMING_SNAKE_CASE__ = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ = class_cost
SCREAMING_SNAKE_CASE__ = bbox_cost
SCREAMING_SNAKE_CASE__ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ = eos_coefficient
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = version.parse("""1.11""" )
@property
def lowercase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase_ ( self ):
'''simple docstring'''
return 1E-4
@property
def lowercase_ ( self ):
'''simple docstring'''
return 12
| 100
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_lowercase = get_logger(__name__)
class __a ( enum.Enum ):
'''simple docstring'''
_lowerCamelCase : Tuple = """all_checks"""
_lowerCamelCase : Optional[int] = """basic_checks"""
_lowerCamelCase : str = """no_checks"""
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__=None ) ->Union[str, Any]:
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = " for " + verification_name if verification_name is not None else ""
if len(__magic_name__ ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->Any:
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise ExpectedMoreSplits(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise UnexpectedSplits(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
__lowercase = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__magic_name__ ) > 0:
raise NonMatchingSplitsSizesError(str(__magic_name__ ) )
logger.info("All the splits matched successfully." )
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ = True ) ->dict:
if record_checksum:
__lowercase = shaaaa()
with open(__magic_name__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b"" ):
m.update(__magic_name__ )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(__magic_name__ ), "checksum": checksum}
def lowerCAmelCase__ ( __magic_name__ ) ->List[str]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 118
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = """vivit"""
def __init__( self , UpperCamelCase_=224 , UpperCamelCase_=32 , UpperCamelCase_=[2, 16, 16] , UpperCamelCase_=3 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu_fast" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-0_6 , UpperCamelCase_=True , **UpperCamelCase_ , ):
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = image_size
__magic_name__ = num_frames
__magic_name__ = tubelet_size
__magic_name__ = num_channels
__magic_name__ = qkv_bias
super().__init__(**UpperCAmelCase_ )
| 711
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCamelCase = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def lowercase ( __UpperCamelCase=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCAmelCase ) )
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = None
_lowerCamelCase = None
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
with TemporaryDirectory() as tmp_dir:
__magic_name__ = dataset_module_factory(UpperCamelCase_ , cache_dir=UpperCamelCase_ )
__magic_name__ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase_ )
__magic_name__ = builder_cls(
cache_dir=UpperCamelCase_ , config_name=UpperCamelCase_ , hash=dataset_module.hash , )
__magic_name__ = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCamelCase_ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
__magic_name__ = cached_path(UpperCamelCase_ , cache_dir=UpperCamelCase_ )
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
@pytest.mark.integration
def lowercase ( __UpperCamelCase ) -> Tuple:
__magic_name__ = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
__magic_name__ = dataset_module_factory('''wikipedia''' , cache_dir=__UpperCamelCase )
__magic_name__ = import_main_class(dataset_module.module_path )
__magic_name__ = builder_cls(
cache_dir=__UpperCamelCase , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__magic_name__ = None
builder_instance.download_and_prepare()
__magic_name__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowercase ( __UpperCamelCase ) -> Tuple:
__magic_name__ = dataset_module_factory('''wikipedia''' , cache_dir=__UpperCamelCase )
__magic_name__ = import_main_class(dataset_module.module_path , dataset=__UpperCamelCase )
__magic_name__ = builder_cls(
cache_dir=__UpperCamelCase , config_name='''20220301.frr''' , hash=dataset_module.hash , )
__magic_name__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert "train" in ds
assert isinstance(ds['''train'''] , __UpperCamelCase )
assert next(iter(ds['''train'''] ) )
| 190
| 0
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCamelCase : Any = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCamelCase : str = "main"
# Default branch name
__lowerCamelCase : Dict = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__lowerCamelCase : Optional[int] = "aaaaaaa"
# This commit does not exist, so we should 404.
__lowerCamelCase : Tuple = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCamelCase : List[Any] = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> str:
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class a ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCAmelCase_ ( self , __UpperCamelCase )-> Optional[int]:
'''simple docstring'''
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCAmelCase_ ( self , __UpperCamelCase )-> int:
'''simple docstring'''
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCAmelCase_ ( self , __UpperCamelCase )-> Optional[Any]:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def lowerCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
self.assertEqual(find_labels(__UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class a ( UpperCamelCase_ ):
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ['''labels'''] )
@require_tf
def lowerCAmelCase_ ( self )-> Dict:
'''simple docstring'''
self.assertEqual(find_labels(__UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class a ( UpperCamelCase_ ):
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ['''labels'''] )
@require_flax
def lowerCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
class a ( UpperCamelCase_ ):
pass
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
| 416
|
from typing import Any
class a :
def __init__( self , __UpperCamelCase )-> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =data
A__ : Tuple =None
def __repr__( self )-> str:
'''simple docstring'''
return F'Node({self.data})'
class a :
def __init__( self )-> Optional[int]:
'''simple docstring'''
A__ : Tuple =None
def __iter__( self )-> Any:
'''simple docstring'''
A__ : int =self.head
while node:
yield node.data
A__ : Tuple =node.next
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self )-> str:
'''simple docstring'''
return "->".join([str(__UpperCamelCase ) for item in self] )
def __getitem__( self , __UpperCamelCase )-> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __UpperCamelCase , __UpperCamelCase )-> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
A__ : Any =self.head
for _ in range(__UpperCamelCase ):
A__ : int =current.next
A__ : Dict =data
def lowerCAmelCase_ ( self , __UpperCamelCase )-> None:
'''simple docstring'''
self.insert_nth(len(self ) , __UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase )-> None:
'''simple docstring'''
self.insert_nth(0 , __UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase )-> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
A__ : Tuple =Node(__UpperCamelCase )
if self.head is None:
A__ : Tuple =new_node
elif index == 0:
A__ : Dict =self.head # link new_node to head
A__ : List[Any] =new_node
else:
A__ : int =self.head
for _ in range(index - 1 ):
A__ : List[str] =temp.next
A__ : List[Any] =temp.next
A__ : Optional[Any] =new_node
def lowerCAmelCase_ ( self )-> None: # print every node data
'''simple docstring'''
print(self )
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
return self.delete_nth(0 )
def lowerCAmelCase_ ( self )-> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def lowerCAmelCase_ ( self , __UpperCamelCase = 0 )-> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
A__ : Optional[Any] =self.head # default first node
if index == 0:
A__ : int =self.head.next
else:
A__ : List[str] =self.head
for _ in range(index - 1 ):
A__ : List[str] =temp.next
A__ : int =temp.next
A__ : Any =temp.next.next
return delete_node.data
def lowerCAmelCase_ ( self )-> bool:
'''simple docstring'''
return self.head is None
def lowerCAmelCase_ ( self )-> None:
'''simple docstring'''
A__ : Dict =None
A__ : Optional[int] =self.head
while current:
# Store the current node's next node.
A__ : int =current.next
# Make the current node's next point backwards
A__ : Dict =prev
# Make the previous node be the current node
A__ : Dict =current
# Make the current node the next node (to progress iteration)
A__ : List[str] =next_node
# Return prev in order to put the head at the end
A__ : List[str] =prev
def SCREAMING_SNAKE_CASE__ ( ) -> None:
A__ : Union[str, Any] =LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(snake_case_ ) == i
linked_list.insert_nth(snake_case_, i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1, 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0, 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(snake_case_ ) == 9
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1, 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0, 9 ) ) is True
for i in range(0, 9 ):
A__ : Dict =-i
assert all(linked_list[i] == -i for i in range(0, 9 ) ) is True
linked_list.reverse()
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(-8, 1 ) )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
A__ : Union[str, Any] =[
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'''dlrow olleH''',
7,
5_5_5_5,
0,
-1_9_2.5_5_5_5_5,
'''Hello, world!''',
7_7.9,
Node(1_0 ),
None,
None,
1_2.2_0,
]
A__ : Optional[Any] =LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A__ : Tuple =linked_list.delete_head()
assert result == -9
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A__ : Union[str, Any] =linked_list.delete_tail()
assert result == 1_2.2
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A__ : Tuple =linked_list.delete_nth(1_0 )
assert result is None
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case_ )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
A__ : Optional[Any] =LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(snake_case_ )
print('''\nReading/changing Node data using indexing:''' )
print(f'Element at Position 1: {linked_list[1]}' )
A__ : List[str] =input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(snake_case_ )
print(f'length of linked_list is : {len(snake_case_ )}' )
if __name__ == "__main__":
main()
| 416
| 1
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_A = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_A = 10
_A = 2_56
def lowerCamelCase__ ( __lowerCAmelCase : List[str] ):
"""simple docstring"""
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowerCAmelCase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class _lowerCAmelCase :
def __init__( self , *,
_UpperCamelCase = 0.85 , ) -> Dict:
lowerCAmelCase_ = duplication_jaccard_threshold
lowerCAmelCase_ = NUM_PERM
lowerCAmelCase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCAmelCase_ = defaultdict(_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
lowerCAmelCase_ = self._index.query(_UpperCamelCase )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_UpperCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_UpperCamelCase )
def __a ( self ) -> List[List[Dict]]:
lowerCAmelCase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCAmelCase_ = [base] + list(_UpperCamelCase )
# reformat the cluster to be a list of dict
lowerCAmelCase_ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_UpperCamelCase )
return duplicate_clusters
def __a ( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = self.get_duplicate_clusters()
with open(_UpperCamelCase , "w" ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase__ ( __lowerCAmelCase : Tuple ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = element
lowerCAmelCase_ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase__ ( __lowerCAmelCase : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase__ ( __lowerCAmelCase : Type[Dataset] , __lowerCAmelCase : float ):
"""simple docstring"""
lowerCAmelCase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=100 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = get_tokens(__lowerCAmelCase )
lowerCAmelCase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_A = None
def lowerCamelCase__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = []
for elementa in cluster:
lowerCAmelCase_ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
lowerCAmelCase_ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCAmelCase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
lowerCAmelCase_ = dataset
lowerCAmelCase_ = []
lowerCAmelCase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def lowerCamelCase__ ( __lowerCAmelCase : Type[Dataset] , __lowerCAmelCase : float = 0.85 ):
"""simple docstring"""
lowerCAmelCase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
lowerCAmelCase_ = {}
lowerCAmelCase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowerCAmelCase_ = element
lowerCAmelCase_ = duplicate_indices - set(extreme_dict.keys() )
lowerCAmelCase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCAmelCase_ = element["base_index"] in extreme_dict
if element["is_extreme"]:
lowerCAmelCase_ = extreme_dict[element["base_index"]]["copies"]
print(F"""Original dataset size: {len(__lowerCAmelCase )}""" )
print(F"""Number of duplicate clusters: {len(__lowerCAmelCase )}""" )
print(F"""Files in duplicate cluster: {len(__lowerCAmelCase )}""" )
print(F"""Unique files in duplicate cluster: {len(__lowerCAmelCase )}""" )
print(F"""Filtered dataset size: {len(__lowerCAmelCase )}""" )
return ds_filter, duplicate_clusters
| 707
|
def lowerCamelCase__ ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : set ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = len(__lowerCAmelCase ), len(grid[0] )
if (
min(__lowerCAmelCase , __lowerCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCAmelCase_ = 0
count += depth_first_search(__lowerCAmelCase , row + 1 , __lowerCAmelCase , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , row - 1 , __lowerCAmelCase , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col + 1 , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col - 1 , __lowerCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279
| 0
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=13 , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Dict=True , _UpperCamelCase : str=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : int=99 , _UpperCamelCase : Optional[int]=32 , _UpperCamelCase : str=5 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=37 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Optional[Any]=512 , _UpperCamelCase : Optional[Any]=16 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : Union[str, Any]=0.0_2 , _UpperCamelCase : str=4 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_attention_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_choices
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __snake_case( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowercase ( a , unittest.TestCase ):
lowercase__ : List[Any] = True
lowercase__ : str = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __snake_case( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCamelCase )
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _UpperCamelCase )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
| 403
| 0
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
snake_case_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
snake_case_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE : Dict = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
__SCREAMING_SNAKE_CASE : Dict = 'zero2'
__SCREAMING_SNAKE_CASE : List[Any] = 'zero3'
__SCREAMING_SNAKE_CASE : int = [ZEROa, ZEROa]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
snake_case_ = parameterized.to_safe_name("""_""".join(str(_SCREAMING_SNAKE_CASE ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__SCREAMING_SNAKE_CASE : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A (snake_case__):
'''simple docstring'''
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ) ->Any:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ) ->List[str]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->List[str]:
"""simple docstring"""
snake_case_ = models[model]
snake_case_ = self.run_trainer(
stage=UpperCAmelCase_ , model_name=UpperCAmelCase_ , eval_steps=UpperCAmelCase_ , num_train_epochs=1 , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
self.do_checks(UpperCAmelCase_ )
return output_dir
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir("""./xxx""" , after=UpperCAmelCase_ )
snake_case_ = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(UpperCAmelCase_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
snake_case_ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
snake_case_ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
snake_case_ = self.get_launcher(UpperCAmelCase_ )
snake_case_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase_ , env=self.get_env() )
return output_dir
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any=False ) ->Tuple:
"""simple docstring"""
snake_case_ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 2
| 0
|
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCAmelCase = """"""
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class __magic_name__ ( tr.AbstractTransform ):
def __init__( self : List[Any] , snake_case__ : str = " " ):
'''simple docstring'''
lowercase :List[str] = sentence_delimiter
def __snake_case ( self : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
return list(lowercase_ )
def __snake_case ( self : Any , snake_case__ : List[str] ):
'''simple docstring'''
lowercase :str = []
for sent_idx, sentence in enumerate(lowercase_ ):
chars.extend(self.process_string(lowercase_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCAmelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCAmelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCAmelCase = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
UpperCAmelCase = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
UpperCAmelCase = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __snake_case ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def __snake_case ( self : Tuple , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : str=False ):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )["wer"]
lowercase :Tuple = 0
lowercase :Union[str, Any] = 0
for prediction, reference in zip(lowercase_ , lowercase_ ):
lowercase :Dict = jiwer.compute_measures(
lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 677
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["input_features", "attention_mask"]
def __init__( self : List[Any] , lowercase_ : Tuple=80 , lowercase_ : Optional[int]=16000 , lowercase_ : str=80 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=True , **lowercase_ : List[Any] , ):
'''simple docstring'''
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = num_mel_bins
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_ceptral_normalize
SCREAMING_SNAKE_CASE_ : Dict = normalize_means
SCREAMING_SNAKE_CASE_ : Dict = normalize_vars
SCREAMING_SNAKE_CASE_ : Dict = True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : np.ndarray , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(lowercase_).unsqueeze(0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = ta_kaldi.fbank(lowercase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : int , lowercase_ : Optional[bool] = True , lowercase_ : Optional[bool] = True , lowercase_ : float = 0.0 , ):
'''simple docstring'''
if normalize_means:
SCREAMING_SNAKE_CASE_ : Optional[int] = x[:input_length].mean(axis=0)
SCREAMING_SNAKE_CASE_ : List[str] = np.subtract(lowercase_ , lowercase_)
if normalize_vars:
SCREAMING_SNAKE_CASE_ : Optional[Any] = x[:input_length].std(axis=0)
SCREAMING_SNAKE_CASE_ : Tuple = np.divide(lowercase_ , lowercase_)
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE_ : str = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.astype(np.floataa)
return x
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : List[np.ndarray] , lowercase_ : Optional[np.ndarray] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowercase_ , lowercase_ , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(lowercase_ , lowercase_)
]
def __call__( self : Dict , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , **lowercase_ : List[str] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
SCREAMING_SNAKE_CASE_ : str = isinstance(lowercase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
SCREAMING_SNAKE_CASE_ : List[str] = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.asarray(lowercase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray):
SCREAMING_SNAKE_CASE_ : int = np.asarray(lowercase_ , dtype=np.floataa)
elif isinstance(lowercase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
SCREAMING_SNAKE_CASE_ : Optional[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : Optional[int] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE_ : Dict = [self._extract_fbank_features(lowercase_) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchFeature({'''input_features''': features})
SCREAMING_SNAKE_CASE_ : Dict = self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
# make sure list is in array format
SCREAMING_SNAKE_CASE_ : Tuple = padded_inputs.get('''input_features''')
if isinstance(input_features[0] , lowercase_):
SCREAMING_SNAKE_CASE_ : List[str] = [np.asarray(lowercase_ , dtype=np.floataa) for feature in input_features]
SCREAMING_SNAKE_CASE_ : Optional[int] = padded_inputs.get('''attention_mask''')
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any = [np.asarray(lowercase_ , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
np.array(lowercase_ , dtype=np.intaa)
if self._get_padding_strategies(lowercase_ , max_length=lowercase_) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE_ : Tuple = self.normalize(
padded_inputs['''input_features'''] , attention_mask=lowercase_)
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ : str = padded_inputs.convert_to_tensors(lowercase_)
return padded_inputs
| 512
| 0
|
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
super().__init__(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCamelCase )
__a = self.values[key]
def a__ ( self ):
return (
sum(self.charge_factor - len(lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a__ ( self , lowerCamelCase , lowerCamelCase=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(lowerCamelCase , lowerCamelCase )
| 67
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67
| 1
|
import re
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list:
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''', str_ )]
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
A__ : Any =split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> str:
try:
A__ : Optional[Any] =split_input(snake_case_ )
if upper:
A__ : List[str] =''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
A__ : Optional[int] =''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
return to_simple_case(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
try:
A__ : Union[str, Any] =to_simple_case(snake_case_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str:
return to_complex_case(snake_case_, snake_case_, '''_''' )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str:
return to_complex_case(snake_case_, snake_case_, '''-''' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 416
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCamelCase : Union[str, Any] = ""
__lowerCamelCase : Dict = ""
__lowerCamelCase : Optional[int] = ""
__lowerCamelCase : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
A__ , A__ : Optional[int] =get_dataset(snake_case_, snake_case_ )
print('''Processing...''' )
A__ , A__ , A__ : List[Any] =update_image_and_anno(snake_case_, snake_case_, snake_case_ )
for index, image in enumerate(snake_case_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A__ : List[Any] =random_chars(3_2 )
A__ : Union[str, Any] =paths[index].split(os.sep )[-1].rsplit('''.''', 1 )[0]
A__ : Any =f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg', snake_case_, [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f'Success {index+1}/{len(snake_case_ )} with {file_name}' )
A__ : str =[]
for anno in new_annos[index]:
A__ : Optional[Any] =f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(snake_case_ )
with open(f'/{file_root}.txt', '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> tuple[list, list]:
A__ : int =[]
A__ : int =[]
for label_file in glob.glob(os.path.join(snake_case_, '''*.txt''' ) ):
A__ : Optional[int] =label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0]
with open(snake_case_ ) as in_file:
A__ : Union[str, Any] =in_file.readlines()
A__ : Union[str, Any] =os.path.join(snake_case_, f'{label_name}.jpg' )
A__ : Optional[Any] =[]
for obj_list in obj_lists:
A__ : Optional[int] =obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ = 1 ) -> tuple[list, list, list]:
A__ : List[Any] =[]
A__ : List[str] =[]
A__ : Any =[]
for idx in range(len(snake_case_ ) ):
A__ : int =[]
A__ : Any =img_list[idx]
path_list.append(snake_case_ )
A__ : str =anno_list[idx]
A__ : List[str] =cva.imread(snake_case_ )
if flip_type == 1:
A__ : Optional[int] =cva.flip(snake_case_, snake_case_ )
for bbox in img_annos:
A__ : List[str] =1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
A__ : List[str] =cva.flip(snake_case_, snake_case_ )
for bbox in img_annos:
A__ : Optional[int] =1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(snake_case_ )
new_imgs_list.append(snake_case_ )
return new_imgs_list, new_annos_lists, path_list
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 3_2 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
A__ : Optional[int] =ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 416
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
lowerCamelCase__ : List[str] = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , _a:int = 1_01 ):
snake_case__ = length
def __len__( self:Optional[int] ):
return self.length
def __getitem__( self:Union[str, Any] , _a:int ):
return i
class __magic_name__ :
'''simple docstring'''
def __call__( self:str , _a:int ):
return {"input_ids": torch.tensor(_a ), "labels": torch.tensor(_a )}
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:List[str] ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
snake_case__ = nn.Linear(1_20 , 80 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[int] , _a:List[Any]=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __magic_name__ (snake_case_ ):
'''simple docstring'''
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F"""--output_dir {output_dir}""".split()
snake_case__ = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __magic_name__ (snake_case_ ):
'''simple docstring'''
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F"""--output_dir {output_dir}""".split()
snake_case__ = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCamelCase__ : str = HfArgumentParser((TrainingArguments,))
lowerCamelCase__ : Any = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
lowerCamelCase__ : int = DummyDataset(dataset_length)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
snake_case__ = list(range(len(__lowerCAmelCase ) ) )
snake_case__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
lowerCamelCase__ : Tuple = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCamelCase__ : Tuple = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase__ : Union[str, Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : Optional[int] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase__ : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase__ : Optional[Any] = None
| 208
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : List[Any] = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 568
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657
| 0
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase :List[str] = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> int:
# Initialise PyTorch model
_a = XLNetConfig.from_json_file(_UpperCamelCase )
_a = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_a = finetuning_task
_a = GLUE_TASKS_NUM_LABELS[finetuning_task]
_a = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
_a = finetuning_task
_a = XLNetForQuestionAnswering(_UpperCamelCase )
else:
_a = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
_a = os.path.join(_UpperCamelCase , _UpperCamelCase )
_a = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"Save PyTorch model to {os.path.abspath(_UpperCamelCase )}" )
torch.save(model.state_dict() , _UpperCamelCase )
print(f"Save configuration file to {os.path.abspath(_UpperCamelCase )}" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
lowerCamelCase :int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 718
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
@staticmethod
def _A ( *__UpperCamelCase: Optional[int] , **__UpperCamelCase: str ):
pass
def __snake_case ( _UpperCamelCase ) -> Dict:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase :List[str] = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
a: List[str] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _A ( self: Dict , __UpperCamelCase: Optional[int] , __UpperCamelCase: Tuple , __UpperCamelCase: Dict ):
_a = pipeline(
'''document-question-answering''' , model=__UpperCamelCase , tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_a = INVOICE_URL
_a = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
_a = '''What is the placebo?'''
_a = [
{
'''image''': load_image(__UpperCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def _A ( self: Tuple , __UpperCamelCase: Dict , __UpperCamelCase: List[str] ):
_a = dqa_pipeline(__UpperCamelCase , top_k=2 )
self.assertEqual(
__UpperCamelCase , [
[
{'''score''': ANY(__UpperCamelCase ), '''answer''': ANY(__UpperCamelCase ), '''start''': ANY(__UpperCamelCase ), '''end''': ANY(__UpperCamelCase )},
{'''score''': ANY(__UpperCamelCase ), '''answer''': ANY(__UpperCamelCase ), '''start''': ANY(__UpperCamelCase ), '''end''': ANY(__UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self: List[str] ):
_a = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
_a = INVOICE_URL
_a = '''How many cats are there?'''
_a = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
_a = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_a = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
_a = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_a = []
_a = []
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , words=__UpperCamelCase , boxes=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self: Tuple ):
_a = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self: Dict ):
_a = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _A ( self: Union[str, Any] ):
_a = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCamelCase )
_a = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCamelCase , revision='''3dc6de3''' , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
_a = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
_a = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
_a = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_a = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _A ( self: List[Any] ):
_a = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCamelCase )
_a = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCamelCase , revision='''3dc6de3''' , max_seq_len=50 , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
_a = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_a = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def _A ( self: Optional[Any] ):
_a = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def _A ( self: str ):
pass
| 346
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ) -> List[Any]:
__lowerCAmelCase: Dict = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=__SCREAMING_SNAKE_CASE , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=__SCREAMING_SNAKE_CASE , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
def a__ ( ) -> List[Any]:
__lowerCAmelCase: List[Any] = parse_args()
# Import training_script as a module.
__lowerCAmelCase: str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase: int = script_fpath.stem
__lowerCAmelCase: Dict = importlib.import_module(__SCREAMING_SNAKE_CASE )
# Patch sys.argv
__lowerCAmelCase: Dict = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 346
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__A = data_utils.TransfoXLTokenizer
__A = data_utils.TransfoXLCorpus
__A = data_utils
__A = data_utils
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__SCREAMING_SNAKE_CASE , "rb" ) as fp:
__lowerCAmelCase: Union[str, Any] = pickle.load(__SCREAMING_SNAKE_CASE , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCAmelCase: Optional[Any] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F"Save vocabulary to {pytorch_vocab_dump_path}" )
__lowerCAmelCase: Optional[Any] = corpus.vocab.__dict__
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCAmelCase: Optional[Any] = os.path.abspath(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = os.path.abspath(__SCREAMING_SNAKE_CASE )
print(F"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCAmelCase: str = TransfoXLConfig()
else:
__lowerCAmelCase: List[str] = TransfoXLConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(F"Building PyTorch model from configuration: {config}" )
__lowerCAmelCase: int = TransfoXLLMHeadModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = load_tf_weights_in_transfo_xl(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
__lowerCAmelCase: List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F"Save PyTorch model to {os.path.abspath(__SCREAMING_SNAKE_CASE )}" )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {os.path.abspath(__SCREAMING_SNAKE_CASE )}" )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__A = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 346
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''beit'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any=8_1_9_2 , SCREAMING_SNAKE_CASE__ : str=7_6_8 , SCREAMING_SNAKE_CASE__ : Any=1_2 , SCREAMING_SNAKE_CASE__ : int=1_2 , SCREAMING_SNAKE_CASE__ : List[str]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1e-12 , SCREAMING_SNAKE_CASE__ : int=2_2_4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_6 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Dict=[3, 5, 7, 1_1] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Tuple=0.4 , SCREAMING_SNAKE_CASE__ : List[Any]=2_5_6 , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : str=2_5_5 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__a : int = vocab_size
__a : List[str] = hidden_size
__a : str = num_hidden_layers
__a : List[str] = num_attention_heads
__a : Optional[int] = intermediate_size
__a : List[Any] = hidden_act
__a : str = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = initializer_range
__a : Union[str, Any] = layer_norm_eps
__a : List[str] = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : List[str] = use_mask_token
__a : Union[str, Any] = use_absolute_position_embeddings
__a : Any = use_relative_position_bias
__a : List[str] = use_shared_relative_position_bias
__a : List[Any] = layer_scale_init_value
__a : Union[str, Any] = drop_path_rate
__a : str = use_mean_pooling
# decode head attributes (semantic segmentation)
__a : Optional[Any] = out_indices
__a : Any = pool_scales
# auxiliary head attributes (semantic segmentation)
__a : List[str] = use_auxiliary_head
__a : Tuple = auxiliary_loss_weight
__a : Tuple = auxiliary_channels
__a : Union[str, Any] = auxiliary_num_convs
__a : Tuple = auxiliary_concat_input
__a : Any = semantic_loss_ignore_index
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = version.parse('''1.11''' )
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return 1e-4
| 705
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE__ = 12_8022
SCREAMING_SNAKE_CASE__ = 12_8028
@require_sentencepiece
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = MaMaaaTokenizer
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Dict = True
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
__a : Dict = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__a : str = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : Any = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__a : List[Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Dict = '</s>'
__a : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[str] = self.get_tokenizer()
__a : Optional[int] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Dict = self.get_tokenizer()
__a : List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2, 3, 4, 5, 6] , )
__a : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
__a : Optional[int] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'This is a test' )
@slow
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Tuple = {'input_ids': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''facebook/m2m100_418M'''
__SCREAMING_SNAKE_CASE : List[Any] = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
__SCREAMING_SNAKE_CASE : Any = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[int] = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
__a : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
__a : Optional[int] = 1
return cls
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Union[str, Any] = 'en'
__a : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
# fmt: off
__a : Any = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__a : List[str] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
__a : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Tuple = MaMaaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.lang_token_to_id , SCREAMING_SNAKE_CASE__ )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = 'en'
__a : int = 'fr'
__a : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__a : str = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__a : Optional[Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__a : Optional[Any] = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Optional[Any] = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__a : Optional[int] = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# en_XX, A, test, EOS
'input_ids': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 1_2_8_0_0_6,
} , )
| 577
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = ShapEImgaImgPipeline
UpperCAmelCase__ = ['''image''']
UpperCAmelCase__ = ['''image''']
UpperCAmelCase__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def snake_case__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
return 32
@property
def snake_case__ ( self : str ) ->Dict:
'''simple docstring'''
return 32
@property
def snake_case__ ( self : Any ) ->Tuple:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case__ ( self : str ) ->Optional[int]:
'''simple docstring'''
return 8
@property
def snake_case__ ( self : Optional[int] ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_UpperCamelCase : List[Any] = CLIPVisionModel(lowercase__ )
return model
@property
def snake_case__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase__ , do_normalize=lowercase__ , do_resize=lowercase__ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
@property
def snake_case__ ( self : int ) ->Dict:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : List[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_UpperCamelCase : str = PriorTransformer(**lowercase__ )
return model
@property
def snake_case__ ( self : int ) ->Tuple:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : int = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase : Optional[Any] = ShapERenderer(**lowercase__ )
return model
def snake_case__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : int = self.dummy_prior
_UpperCamelCase : Dict = self.dummy_image_encoder
_UpperCamelCase : Tuple = self.dummy_image_processor
_UpperCamelCase : Optional[Any] = self.dummy_renderer
_UpperCamelCase : Optional[Any] = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=lowercase__ , clip_sample=lowercase__ , clip_sample_range=1.0 , )
_UpperCamelCase : Union[str, Any] = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def snake_case__ ( self : int , lowercase__ : List[str] , lowercase__ : Optional[Any]=0 ) ->Dict:
'''simple docstring'''
_UpperCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
if str(lowercase__ ).startswith("mps" ):
_UpperCamelCase : Any = torch.manual_seed(lowercase__ )
else:
_UpperCamelCase : List[Any] = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
_UpperCamelCase : List[str] = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def snake_case__ ( self : Dict ) ->int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = "cpu"
_UpperCamelCase : int = self.get_dummy_components()
_UpperCamelCase : List[Any] = self.pipeline_class(**lowercase__ )
_UpperCamelCase : List[str] = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
_UpperCamelCase : List[Any] = pipe(**self.get_dummy_inputs(lowercase__ ) )
_UpperCamelCase : Any = output.images[0]
_UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase : Dict = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCamelCase : List[str] = torch_device == "cpu"
_UpperCamelCase : Optional[int] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase__ , relax_max_difference=lowercase__ , )
def snake_case__ ( self : Any ) ->int:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_dummy_components()
_UpperCamelCase : Any = self.pipeline_class(**lowercase__ )
_UpperCamelCase : Any = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
_UpperCamelCase : List[str] = 1
_UpperCamelCase : str = 2
_UpperCamelCase : str = self.get_dummy_inputs(lowercase__ )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase : Optional[int] = batch_size * [inputs[key]]
_UpperCamelCase : Optional[int] = pipe(**lowercase__ , num_images_per_prompt=lowercase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCamelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
_UpperCamelCase : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
_UpperCamelCase : List[Any] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
_UpperCamelCase : List[str] = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
_UpperCamelCase : Union[str, Any] = torch.Generator(device=lowercase__ ).manual_seed(0 )
_UpperCamelCase : Union[str, Any] = pipe(
lowercase__ , generator=lowercase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
| 435
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=0.6 , lowerCamelCase=None , ):
'''simple docstring'''
__A : Tuple = parent
__A : Union[str, Any] = batch_size
__A : List[Any] = image_size
__A : Union[str, Any] = patch_size
__A : List[Any] = num_channels
__A : Optional[Any] = is_training
__A : str = use_labels
__A : Tuple = hidden_size
__A : int = num_hidden_layers
__A : Dict = num_attention_heads
__A : List[Any] = intermediate_size
__A : Tuple = hidden_act
__A : Tuple = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : Optional[Any] = type_sequence_label_size
__A : Union[str, Any] = initializer_range
__A : Optional[Any] = mask_ratio
__A : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__A : Optional[int] = (image_size // patch_size) ** 2
__A : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Tuple = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ViTMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__A : str = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Optional[Any] = model(lowerCamelCase )
__A : List[str] = (self.image_size // self.patch_size) ** 2
__A : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__A : List[str] = 1
__A : str = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A : int = model(lowerCamelCase )
__A : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[str] = self.prepare_config_and_inputs()
__A ,__A ,__A : List[Any] = config_and_inputs
__A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = ViTMAEModelTester(self )
__A : Tuple = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(lowerCamelCase )
__A : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Optional[Any] = [*signature.parameters.keys()]
__A : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
np.random.seed(2 )
__A : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__A : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__A : Optional[Any] = torch.from_numpy(lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__A : int = pt_noise
super().check_pt_tf_models(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__A : Tuple = outputs[0].cpu().numpy()
__A : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
__A : List[Any] = model_class.from_pretrained(lowerCamelCase )
model.to(lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__A : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
# Make sure we don't have nans
__A : List[Any] = after_outputs[0].cpu().numpy()
__A : List[str] = 0
__A : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = ViTMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowercase ():
'''simple docstring'''
__A : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
np.random.seed(2 )
__A : Dict = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(lowerCamelCase )
__A : str = self.default_image_processor
__A : List[Any] = prepare_img()
__A : Union[str, Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__A : Optional[Any] = ViTMAEConfig()
__A : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__A : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__A : str = model(**lowerCamelCase , noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) )
# verify the logits
__A : Union[str, Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__A : int = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase ) , atol=1E-4 ) )
| 111
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[str]= logging.get_logger()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ) -> Optional[Any]:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
UpperCamelCase__ = timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 1_92:
UpperCamelCase__ = timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 2_56:
UpperCamelCase__ = timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 3_84:
UpperCamelCase__ = timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE )
from_model.eval()
UpperCamelCase__ = LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = from_model.state_dict()
UpperCamelCase__ = list(from_model.state_dict().keys() )
UpperCamelCase__ = list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = torch.randn((2, 3, 2_24, 2_24) )
UpperCamelCase__ = from_model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = our_model(SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
UpperCamelCase__ = name
print(SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCamelCase__ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = 10_00
UpperCamelCase__ = (1, num_labels)
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = num_labels
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = partial(SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
UpperCamelCase__ = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE , names_to_config[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
A__ : List[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
A__ : Any= parser.parse_args()
A__ : Path= args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20
| 1
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__a :int = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
A_ = []
def parse_line(__UpperCamelCase : List[str] ):
for line in fp:
if isinstance(snake_case_ ,snake_case_ ):
A_ = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case_ ) > 0:
A_ = '''\n'''.join(snake_case_ )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(snake_case_ )
buffer.clear()
continue
else:
A_ = line.strip()
buffer.append(snake_case_ )
if from_gh:
for filename in os.listdir(snake_case_ ):
A_ = os.path.join(snake_case_ ,snake_case_ )
if not os.path.isdir(snake_case_ ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case_ ) as fp:
parse_line(snake_case_ )
else:
try:
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case_ ) as fp:
parse_line(snake_case_ )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = set()
A_ = [os.path.join(snake_case_ ,snake_case_ ) for p in os.listdir(snake_case_ ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case_ ,snake_case_ ) )
return selected_warnings
if __name__ == "__main__":
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
return values.split("," )
__a :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
__a :Tuple = parser.parse_args()
__a :List[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__a :Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__a :Optional[Any] = extract_warnings(args.output_dir, args.targets)
__a :Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 86
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64
| 0
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCamelCase = True
except (ImportError, AttributeError):
UpperCamelCase = object
def _lowerCamelCase ( *UpperCAmelCase_ : Any, **UpperCAmelCase_ : str ) -> Dict:
"""simple docstring"""
pass
UpperCamelCase = False
UpperCamelCase = logging.get_logger("""transformers-cli/serving""")
def _lowerCamelCase ( UpperCAmelCase_ : Namespace ) -> List[str]:
"""simple docstring"""
A__ = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(UpperCAmelCase_, args.host, args.port, args.workers )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : dict
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[str]
A__ : Optional[List[int]]
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : str
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Any
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def snake_case__ ( SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=SCREAMING_SNAKE_CASE__ , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=SCREAMING_SNAKE_CASE__ , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=SCREAMING_SNAKE_CASE__ , default=8888 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=SCREAMING_SNAKE_CASE__ , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=SCREAMING_SNAKE_CASE__ , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=SCREAMING_SNAKE_CASE__ , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = pipeline
A__ = host
A__ = port
A__ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(f"""Serving model over {host}:{port}""" )
A__ = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=["POST"] , ),
] , timeout=600 , )
def snake_case__ ( self ) -> str:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def snake_case__ ( self ) -> Dict:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) ) -> List[Any]:
try:
A__ = self._pipeline.tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
if return_ids:
A__ = self._pipeline.tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE__ , tokens_ids=SCREAMING_SNAKE_CASE__ )
else:
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(SCREAMING_SNAKE_CASE__ )} )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , ) -> Any:
try:
A__ = self._pipeline.tokenizer.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return ServeDeTokenizeResult(model="" , text=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(SCREAMING_SNAKE_CASE__ )} )
async def snake_case__ ( self , SCREAMING_SNAKE_CASE__=Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) ) -> List[str]:
# Check we don't have empty string
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
A__ = self._pipeline(SCREAMING_SNAKE_CASE__ )
return ServeForwardResult(output=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(500 , {"error": str(SCREAMING_SNAKE_CASE__ )} )
| 715
|
"""simple docstring"""
import numpy as np
def _lowerCamelCase ( UpperCAmelCase_ : np.array ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _lowerCamelCase ( UpperCAmelCase_ : np.array ) -> np.array:
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 562
| 0
|
def UpperCamelCase_ ( __a = 1_000_000 ) -> int:
a__ : int = limit + 1
a__ : str = [0] * limit
for first_term in range(1 , __a ):
for n in range(__a , __a , __a ):
a__ : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__ : Tuple = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37
|
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__ ( A , A ):
"""simple docstring"""
__a = 1
@register_to_config
def __init__( self : Dict , UpperCamelCase : int = 1_000 , UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : int = 4
# running values
__UpperCAmelCase : Union[str, Any] = []
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = num_inference_steps
__UpperCAmelCase : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : int = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : Union[str, Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Any = timesteps.to(UpperCamelCase )
__UpperCAmelCase : List[Any] = []
def lowerCamelCase__ ( self : Dict , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[Any] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : List[str] = timestep_index + 1
__UpperCAmelCase : Optional[Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase )
if len(self.ets ) == 1:
__UpperCAmelCase : List[Any] = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Tuple = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : int = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : int = self._get_prev_sample(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : torch.FloatTensor , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
return sample
def lowerCamelCase__ ( self : Dict , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.alphas[timestep_index]
__UpperCAmelCase : List[str] = self.betas[timestep_index]
__UpperCAmelCase : List[str] = self.alphas[prev_timestep_index]
__UpperCAmelCase : Tuple = self.betas[prev_timestep_index]
__UpperCAmelCase : Dict = (sample - sigma * ets) / max(UpperCamelCase , 1e-8 )
__UpperCAmelCase : Union[str, Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ):
'''simple docstring'''
return self.config.num_train_timesteps
| 139
| 0
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : str = ""
for word_or_phrase in separated:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 203
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_snake_case : Any = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ):
# Recurse if needed
if "." in tensor_name:
__snake_case : Tuple = tensor_name.split("." )
for split in splits[:-1]:
__snake_case : List[str] = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
__snake_case : Optional[int] = new_module
__snake_case : Union[str, Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
__snake_case : Optional[int] = tensor_name in module._buffers
__snake_case : List[str] = getattr(__lowerCamelCase , __lowerCamelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
__snake_case : List[Any] = False
__snake_case : Optional[int] = False
if is_buffer or not is_bitsandbytes_available():
__snake_case : Dict = False
__snake_case : Optional[int] = False
else:
__snake_case : Any = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__snake_case : Union[str, Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__snake_case : Union[str, Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__snake_case : Any = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
__snake_case : int = value.to("cpu" )
if value.dtype == torch.inta:
__snake_case : Optional[int] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
__snake_case : Optional[int] = torch.tensor(__lowerCamelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCamelCase ) and fpaa_statistics is None:
__snake_case : List[Any] = new_value.T
__snake_case : Tuple = old_value.__dict__
if is_abit:
__snake_case : List[str] = bnb.nn.IntaParams(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
elif is_abit:
__snake_case : Optional[int] = bnb.nn.Paramsabit(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
__snake_case : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(__lowerCamelCase ) )
else:
if value is None:
__snake_case : Tuple = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
__snake_case : Dict = value.to(__lowerCamelCase )
else:
__snake_case : List[Any] = torch.tensor(__lowerCamelCase , device=__lowerCamelCase )
if is_buffer:
__snake_case : Optional[Any] = new_value
else:
__snake_case : int = nn.Parameter(__lowerCamelCase , requires_grad=old_value.requires_grad )
__snake_case : Tuple = new_value
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False ):
for name, module in model.named_children():
if current_key_name is None:
__snake_case : List[Any] = []
current_key_name.append(__lowerCamelCase )
if (isinstance(__lowerCamelCase , nn.Linear ) or isinstance(__lowerCamelCase , __lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(__lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Union[str, Any] = module.weight.shape
else:
__snake_case : List[Any] = module.in_features
__snake_case : Tuple = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__snake_case : Tuple = bnb.nn.LinearabitLt(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__snake_case : List[str] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__snake_case : Optional[int] = bnb.nn.Linearabit(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__snake_case : int = True
# Store the module class in case we need to transpose the weight later
__snake_case : str = type(__lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCamelCase )
if len(list(module.children() ) ) > 0:
__snake_case , __snake_case : Tuple = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_been_replaced=__lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None ):
__snake_case : List[str] = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
__snake_case , __snake_case : Any = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , __lowerCamelCase , )
return replace_with_bnb_linear(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , __lowerCamelCase , )
return set_module_quantized_tensor_to_device(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[int] = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__snake_case : Any = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__snake_case : Any = sum(__lowerCamelCase , [] )
__snake_case : Optional[int] = len(__lowerCamelCase ) > 0
# Check if it is a base model
__snake_case : Dict = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__snake_case : Optional[int] = list(model.named_children() )
__snake_case : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
__snake_case : int = set(__lowerCamelCase ) - set(__lowerCamelCase )
__snake_case : Optional[Any] = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
__snake_case : Optional[Any] = [".weight", ".bias"]
__snake_case : Any = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__snake_case : Optional[int] = name.replace(__lowerCamelCase , "" )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
| 203
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCAmelCase = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ) ->List[Any]:
lowerCamelCase__ : Dict =(
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCamelCase__ : int =bs[:]
lowerCamelCase__ : Optional[int] =0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ : List[str] =[chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) ->Any:
lowerCamelCase__ : List[Any] =set()
lowerCamelCase__ : List[Any] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : int =char
return pairs
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["""input_ids""", """attention_mask"""]
def __init__( self :Tuple , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any="replace" , lowerCamelCase_ :Dict="<s>" , lowerCamelCase_ :Optional[Any]="</s>" , lowerCamelCase_ :int="</s>" , lowerCamelCase_ :Optional[int]="<s>" , lowerCamelCase_ :str="<unk>" , lowerCamelCase_ :List[Any]="<pad>" , lowerCamelCase_ :Optional[Any]="<mask>" , lowerCamelCase_ :Optional[Any]=False , **lowerCamelCase_ :str , ):
"""simple docstring"""
lowerCamelCase__ : Dict =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
lowerCamelCase__ : Tuple =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
lowerCamelCase__ : int =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
lowerCamelCase__ : Dict =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
lowerCamelCase__ : Any =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
lowerCamelCase__ : Optional[int] =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : Tuple =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase__ : Optional[Any] =json.load(lowerCamelCase_ )
lowerCamelCase__ : List[str] ={v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Dict =errors # how to handle errors in decoding
lowerCamelCase__ : Any =bytes_to_unicode()
lowerCamelCase__ : List[Any] ={v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase__ : str =merges_handle.read().split('\n' )[1:-1]
lowerCamelCase__ : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ : Dict =dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase__ : Optional[int] ={}
lowerCamelCase__ : Optional[Any] =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ : Any =re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
return len(self.encoder )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : Union[str, Any] =tuple(lowerCamelCase_ )
lowerCamelCase__ : Any =get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
lowerCamelCase__ : List[Any] =min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : str =bigram
lowerCamelCase__ : int =[]
lowerCamelCase__ : Dict =0
while i < len(lowerCamelCase_ ):
try:
lowerCamelCase__ : List[str] =word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : str =j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : List[Any] =tuple(lowerCamelCase_ )
lowerCamelCase__ : Dict =new_word
if len(lowerCamelCase_ ) == 1:
break
else:
lowerCamelCase__ : List[str] =get_pairs(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =' '.join(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =word
return word
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[]
for token in re.findall(self.pat , lowerCamelCase_ ):
lowerCamelCase__ : str =''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(' ' ) )
return bpe_tokens
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :Any ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :Dict ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =''.join(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase__ : List[str] =os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : List[str] =os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '\n' )
lowerCamelCase__ : Any =0
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase__ : Tuple =token_index
writer.write(' '.join(lowerCamelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : int =[self.cls_token_id]
lowerCamelCase__ : List[str] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None , lowerCamelCase_ :bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[self.sep_token_id]
lowerCamelCase__ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : Dict =kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
lowerCamelCase__ : str =' ' + text
return (text, kwargs)
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[bool] = None , ):
"""simple docstring"""
lowerCamelCase__ : Tuple =super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase__ : Union[str, Any] ='attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase__ : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase__ : Optional[Any] =len(encoded_inputs['global_attention_mask'] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
lowerCamelCase__ : Tuple =len(lowerCamelCase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase__ : List[str] =(
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase__ : Union[str, Any] =[-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 174
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """timm_backbone"""
def __init__( self :Any , lowerCamelCase_ :int=None , lowerCamelCase_ :Optional[int]=3 , lowerCamelCase_ :int=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Union[str, Any]=None , **lowerCamelCase_ :Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =backbone
lowerCamelCase__ : List[Any] =num_channels
lowerCamelCase__ : Tuple =features_only
lowerCamelCase__ : Dict =use_pretrained_backbone
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Optional[int] =out_indices if out_indices is not None else (-1,)
| 174
| 1
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCamelCase_ ( lowerCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
if not is_accelerate_available():
return method
__magic_name__ : int = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCamelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : Tuple , *lowerCamelCase : int , **lowerCamelCase : Any ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *lowerCamelCase , **lowerCamelCase )
return wrapper
| 147
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A = TypeVar("""T""")
A = TypeVar("""U""")
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Any , snake_case : T | None , snake_case : U | None ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[Any] = key
__magic_name__ : str = val
__magic_name__ : DoubleLinkedListNode[T, U] | None = None
__magic_name__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : str ) -> str:
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : str ) -> None:
'''simple docstring'''
__magic_name__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case , snake_case )
__magic_name__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case , snake_case )
__magic_name__ , __magic_name__ : Tuple = self.rear, self.head
def __repr__( self : str ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = ['''DoubleLinkedList''']
__magic_name__ : Optional[Any] = self.head
while node.next is not None:
rep.append(str(snake_case ) )
__magic_name__ : Any = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case )
def _UpperCAmelCase ( self : List[str] , snake_case : DoubleLinkedListNode[T, U] ) -> None:
'''simple docstring'''
__magic_name__ : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__magic_name__ : Dict = node
__magic_name__ : Optional[int] = previous
__magic_name__ : Tuple = node
__magic_name__ : Optional[int] = self.rear
def _UpperCAmelCase ( self : str , snake_case : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__magic_name__ : str = node.next
__magic_name__ : Dict = node.prev
__magic_name__ : Any = None
__magic_name__ : Dict = None
return node
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
snake_case_ = {}
def __init__( self : Dict , snake_case : int ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : DoubleLinkedList[T, U] = DoubleLinkedList()
__magic_name__ : str = capacity
__magic_name__ : Tuple = 0
__magic_name__ : Optional[Any] = 0
__magic_name__ : List[str] = 0
__magic_name__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : List[str] , snake_case : T ) -> bool:
'''simple docstring'''
return key in self.cache
def _UpperCAmelCase ( self : Optional[int] , snake_case : T ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
__magic_name__ : DoubleLinkedListNode[T, U] = self.cache[key]
__magic_name__ : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case )
return node.val
self.miss += 1
return None
def _UpperCAmelCase ( self : str , snake_case : T , snake_case : U ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__magic_name__ : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__magic_name__ : List[str] = DoubleLinkedListNode(snake_case , snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__magic_name__ : Dict = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__magic_name__ : Any = value
self.list.add(snake_case )
@classmethod
def _UpperCAmelCase ( cls : Tuple , snake_case : int = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(snake_case : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__magic_name__ : Any = LRUCache(snake_case )
__magic_name__ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__magic_name__ : Any = func(*snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case , '''cache_info''' , snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147
| 1
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_snake_case : Optional[int] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __SCREAMING_SNAKE_CASE ( a__ ):
SCREAMING_SNAKE_CASE__ =field(default=a__ , metadata={"""help""": """Whether to use SortishSampler or not."""} )
SCREAMING_SNAKE_CASE__ =field(
default=a__ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
SCREAMING_SNAKE_CASE__ =field(
default=a__ , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=a__ , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=a__ , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
__SCREAMING_SNAKE_CASE = v.to_dict()
return d
| 693
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader
| 90
| 0
|
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ : Any = 1_6
A__ : List[Any] = 3_2
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 16 ):
"""simple docstring"""
_lowercase: Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowercase: List[Any] = DatasetDict(
{
'''train''': dataset['''train'''].select(_UpperCamelCase ),
'''validation''': dataset['''train'''].select(_UpperCamelCase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowercase: Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowercase: Dict = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase: List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowercase: List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowercase: str = 16
elif accelerator.mixed_precision != "no":
_lowercase: Dict = 8
else:
_lowercase: List[Any] = None
return tokenizer.pad(
_UpperCamelCase , padding='''longest''' , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
_lowercase: int = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
_lowercase: List[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
_lowercase: List[Any] = DataLoader(
tokenized_datasets['''test'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: Any = []
# Download the dataset
_lowercase: List[Any] = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
_lowercase: Tuple = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_lowercase: Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase: Optional[int] = config['''lr''']
_lowercase: List[Any] = int(config['''num_epochs'''] )
_lowercase: Any = int(config['''seed'''] )
_lowercase: Optional[Any] = int(config['''batch_size'''] )
_lowercase: List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_lowercase: Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowercase: Any = batch_size // MAX_GPU_BATCH_SIZE
_lowercase: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCamelCase )
# New Code #
# Create our folds:
_lowercase: int = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
_lowercase: Any = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_UpperCamelCase ):
_lowercase: str = get_fold_dataloaders(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase: str = model.to(accelerator.device )
# Instantiate optimizer
_lowercase: str = AdamW(params=model.parameters() , lr=_UpperCamelCase )
# Instantiate scheduler
_lowercase: Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase: Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowercase: Optional[Any] = model(**_UpperCamelCase )
_lowercase: str = outputs.loss
_lowercase: str = loss / gradient_accumulation_steps
accelerator.backward(_UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase: Optional[Any] = model(**_UpperCamelCase )
_lowercase: List[Any] = outputs.logits.argmax(dim=-1 )
_lowercase: Tuple = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
_lowercase: Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _UpperCamelCase )
# New Code #
# We also run predictions on the test set at the very end
_lowercase: Tuple = []
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase: int = model(**_UpperCamelCase )
_lowercase: Optional[int] = outputs.logits
_lowercase: Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_UpperCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_lowercase: Optional[int] = torch.cat(_UpperCamelCase , dim=0 )
_lowercase: List[str] = torch.stack(_UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_lowercase: str = metric.compute(predictions=_UpperCamelCase , references=_UpperCamelCase )
accelerator.print('''Average test metrics from all folds:''' , _UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Dict = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCamelCase , default=_UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=_UpperCamelCase , default=3 , help='''The number of splits to perform across the dataset''' )
_lowercase: Union[str, Any] = parser.parse_args()
_lowercase: List[str] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 710
|
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[str] = VideoMAEConfig()
set_architecture_configs(_UpperCamelCase , _UpperCamelCase )
if "finetuned" not in model_name:
_lowercase: Optional[Any] = False
if "finetuned" in model_name:
_lowercase: Union[str, Any] = '''huggingface/label-files'''
if "kinetics" in model_name:
_lowercase: Union[str, Any] = 400
_lowercase: Dict = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
_lowercase: str = 174
_lowercase: Dict = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
_lowercase: List[str] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowercase: Optional[int] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
_lowercase: Union[str, Any] = idalabel
_lowercase: int = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if "small" in model_name:
_lowercase: Optional[Any] = 384
_lowercase: int = 1_536
_lowercase: int = 12
_lowercase: List[str] = 16
_lowercase: List[Any] = 12
_lowercase: List[Any] = 3
_lowercase: Optional[int] = 192
_lowercase: Union[str, Any] = 768
elif "large" in model_name:
_lowercase: Union[str, Any] = 1_024
_lowercase: Any = 4_096
_lowercase: int = 24
_lowercase: str = 16
_lowercase: List[str] = 12
_lowercase: Union[str, Any] = 8
_lowercase: Optional[int] = 512
_lowercase: Optional[Any] = 2_048
elif "huge" in model_name:
_lowercase: Optional[int] = 1_280
_lowercase: Tuple = 5_120
_lowercase: Any = 32
_lowercase: Any = 16
_lowercase: Any = 12
_lowercase: int = 8
_lowercase: Union[str, Any] = 640
_lowercase: Dict = 2_560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if "encoder." in name:
_lowercase: str = name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
_lowercase: Tuple = name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
_lowercase: int = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
_lowercase: List[Any] = name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_lowercase: str = name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowercase: Any = name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
_lowercase: Dict = name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
_lowercase: int = name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
_lowercase: int = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
_lowercase: Any = name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
_lowercase: Optional[Any] = name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
_lowercase: Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowercase: Any = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowercase: Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowercase: Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
_lowercase: List[str] = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
_lowercase: Optional[Any] = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
_lowercase: List[str] = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_lowercase: Any = name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_lowercase: List[Any] = name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
_lowercase: int = name.replace('''head''' , '''classifier''' )
return name
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowercase: Any = orig_state_dict.pop(_UpperCamelCase )
if key.startswith('''encoder.''' ):
_lowercase: Any = key.replace('''encoder.''' , '''''' )
if "qkv" in key:
_lowercase: List[str] = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
_lowercase: Union[str, Any] = config.decoder_hidden_size
_lowercase: Union[str, Any] = int(key_split[2] )
_lowercase: Optional[int] = '''decoder.decoder_layers.'''
if "weight" in key:
_lowercase: Dict = val[:dim, :]
_lowercase: str = val[dim : dim * 2, :]
_lowercase: int = val[-dim:, :]
else:
_lowercase: Union[str, Any] = config.hidden_size
_lowercase: Optional[Any] = int(key_split[1] )
_lowercase: Optional[Any] = '''videomae.encoder.layer.'''
if "weight" in key:
_lowercase: Union[str, Any] = val[:dim, :]
_lowercase: str = val[dim : dim * 2, :]
_lowercase: str = val[-dim:, :]
else:
_lowercase: List[str] = val
return orig_state_dict
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Union[str, Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_lowercase: List[Any] = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: int = get_videomae_config(_UpperCamelCase )
if "finetuned" in model_name:
_lowercase: Optional[int] = VideoMAEForVideoClassification(_UpperCamelCase )
else:
_lowercase: Union[str, Any] = VideoMAEForPreTraining(_UpperCamelCase )
# download original checkpoint, hosted on Google Drive
_lowercase: Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(_UpperCamelCase , _UpperCamelCase , quiet=_UpperCamelCase )
_lowercase: str = torch.load(_UpperCamelCase , map_location='''cpu''' )
if "model" in files:
_lowercase: Any = files['''model''']
else:
_lowercase: Tuple = files['''module''']
_lowercase: Any = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# verify model on basic input
_lowercase: Tuple = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_lowercase: Optional[Any] = prepare_video()
_lowercase: int = image_processor(_UpperCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
_lowercase: Optional[int] = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
_lowercase: Union[str, Any] = torch.load(_UpperCamelCase )
_lowercase: int = model(**_UpperCamelCase )
_lowercase: List[Any] = outputs.logits
_lowercase: str = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_lowercase: Union[str, Any] = torch.Size([1, 400] )
_lowercase: Any = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
_lowercase: Union[str, Any] = torch.Size([1, 174] )
_lowercase: Union[str, Any] = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
_lowercase: str = torch.Size([1, 1_408, 1_536] )
_lowercase: List[str] = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
_lowercase: Tuple = torch.Size([1, 1_408, 1_536] )
_lowercase: Optional[int] = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_lowercase: str = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
_lowercase: Tuple = torch.Size([1, 1_408, 1_536] )
_lowercase: List[str] = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_lowercase: Dict = torch.Size([1, 400] )
_lowercase: Dict = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_lowercase: Union[str, Any] = torch.Size([1, 400] )
_lowercase: str = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_lowercase: List[str] = torch.Size([1, 400] )
_lowercase: Union[str, Any] = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
_lowercase: Tuple = torch.Size([1, 400] )
_lowercase: Any = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
_lowercase: int = torch.Size([1, 1_408, 1_536] )
_lowercase: str = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_lowercase: Dict = torch.Size([1, 174] )
_lowercase: Optional[Any] = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
_lowercase: str = torch.Size([1, 1_408, 1_536] )
_lowercase: str = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_lowercase: List[Any] = torch.Size([1, 174] )
_lowercase: Tuple = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _UpperCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_lowercase: List[str] = outputs.loss
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(_UpperCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A__ : Dict = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 272
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : List[Any] = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 571
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=1_3 , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Tuple=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Dict=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : List[str]=9_9 , _lowerCamelCase : Any=3_2 , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : int=4 , _lowerCamelCase : List[Any]=3_7 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : List[Any]=5_1_2 , _lowerCamelCase : List[str]=1_6 , _lowerCamelCase : Dict=2 , _lowerCamelCase : int=0.02 , _lowerCamelCase : str=4 , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_choices
def A__ ( self : List[str] ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A__ ( self : Union[str, Any] ):
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[str] =(
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self : Tuple ):
A__ = FlaxAlbertModelTester(self )
@slow
def A__ ( self : List[str] ):
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained('''albert-base-v2''' )
A__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[Any] ):
A__ = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
A__ = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
A__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
A__ = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , _lowerCamelCase )
A__ = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1E-4 ) )
| 571
| 1
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : int = GPTSanJapaneseTokenizer
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Tuple = {'''do_clean_text''': False, '''add_prefix_space''': False}
def A( self):
super().setUp()
# fmt: off
__UpperCAmelCase : Union[str, Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
__UpperCAmelCase : str = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
__UpperCAmelCase : Union[str, Any] = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
with open(self.emoji_file , '''w''') as emoji_writer:
emoji_writer.write(json.dumps(lowercase__))
def A( self , **lowercase__):
kwargs.update(self.special_tokens_map)
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : Optional[Any] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
__UpperCAmelCase : Tuple = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def A( self , lowercase__):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.get_input_output_texts(lowercase__)
__UpperCAmelCase : Tuple = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : Dict = tokenizer.decode(lowercase__ , clean_up_tokenization_spaces=lowercase__)
return text, ids
def A( self):
pass # TODO add if relevant
def A( self):
pass # TODO add if relevant
def A( self):
pass # TODO add if relevant
def A( self):
__UpperCAmelCase : List[Any] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Any = '''こんにちは、世界。 こんばんは、㔺界。'''
__UpperCAmelCase : List[Any] = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
__UpperCAmelCase : List[Any] = tokenizer.tokenize(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
# Testing conversion to ids with special tokens
__UpperCAmelCase : Optional[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
__UpperCAmelCase : Dict = tokenizer.convert_tokens_to_ids(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : str = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : List[str] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
__UpperCAmelCase : Optional[Any] = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
__UpperCAmelCase : Any = tokenizer.encode(lowercase__)
__UpperCAmelCase : List[Any] = tokenizer.decode(lowercase__)
self.assertEqual(lowercase__ , lowercase__)
@slow
def A( self):
__UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''')
# Testing tokenization
__UpperCAmelCase : Dict = '''こんにちは、世界。'''
__UpperCAmelCase : List[str] = '''こんばんは、㔺界。😀'''
__UpperCAmelCase : Union[str, Any] = '''こんにちは、世界。こんばんは、世界。😀'''
__UpperCAmelCase : Optional[Any] = tokenizer.encode(prefix_text + input_text)
__UpperCAmelCase : int = tokenizer.encode('''''' , prefix_text=prefix_text + input_text)
__UpperCAmelCase : List[Any] = tokenizer.encode(lowercase__ , prefix_text=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(lowercase__)
__UpperCAmelCase : Tuple = tokenizer.decode(lowercase__)
__UpperCAmelCase : Optional[Any] = tokenizer.decode(lowercase__)
self.assertEqual(lowercase__ , lowercase__)
self.assertEqual(lowercase__ , lowercase__)
self.assertEqual(lowercase__ , lowercase__)
@slow
def A( self):
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''')
# Testing tokenization
__UpperCAmelCase : Tuple = '''こんにちは、世界。'''
__UpperCAmelCase : List[Any] = '''こんばんは、㔺界。😀'''
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(lowercase__)) - 2
__UpperCAmelCase : Tuple = len(tokenizer.encode(lowercase__)) - 2
__UpperCAmelCase : int = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : List[Any] = tokenizer(prefix_text + input_text).token_type_ids
__UpperCAmelCase : int = tokenizer('''''' , prefix_text=prefix_text + input_text).token_type_ids
__UpperCAmelCase : int = tokenizer(lowercase__ , prefix_text=lowercase__).token_type_ids
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
@slow
def A( self):
__UpperCAmelCase : str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''')
__UpperCAmelCase : Union[str, Any] = tokenizer.encode('''あンいワ''')
__UpperCAmelCase : str = tokenizer.encode('''''' , prefix_text='''あンいワ''')
__UpperCAmelCase : Optional[Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''')
self.assertEqual(tokenizer.decode(lowercase__) , tokenizer.decode(lowercase__))
self.assertEqual(tokenizer.decode(lowercase__) , tokenizer.decode(lowercase__))
self.assertNotEqual(lowercase__ , lowercase__)
self.assertNotEqual(lowercase__ , lowercase__)
self.assertEqual(x_token_a[1] , x_token_a[-1]) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3]) # SEG token
@slow
def A( self):
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''')
__UpperCAmelCase : List[str] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
__UpperCAmelCase : List[Any] = tokenizer(lowercase__ , padding=lowercase__)
__UpperCAmelCase : Tuple = tokenizer.batch_encode_plus(lowercase__ , padding=lowercase__)
# fmt: off
__UpperCAmelCase : str = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
__UpperCAmelCase : List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowercase__)
self.assertListEqual(x_token.token_type_ids , lowercase__)
self.assertListEqual(x_token.attention_mask , lowercase__)
self.assertListEqual(x_token_a.input_ids , lowercase__)
self.assertListEqual(x_token_a.token_type_ids , lowercase__)
self.assertListEqual(x_token_a.attention_mask , lowercase__)
def A( self):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def A( self):
# tokenizer has no padding token
pass
| 675
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675
| 1
|
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase (_A ):
"""simple docstring"""
return np.dot(__A , __A )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , *,
snake_case__ = np.inf , snake_case__ = "linear" , snake_case__ = 0.0 , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = regularization
_lowerCAmelCase : Optional[int] = gamma
if kernel == "linear":
_lowerCAmelCase : str = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
_lowerCAmelCase : Optional[int] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
_lowerCAmelCase : List[str] = F'Unknown kernel: {kernel}'
raise ValueError(_A )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return np.dot(_A , _A )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = observations
_lowerCAmelCase : List[Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(_lowerCAmelCase ) : Dict = np.shape(_A )
def to_minimize(snake_case__ ) -> float:
_lowerCAmelCase : int = 0
(_lowerCAmelCase ) : int = np.shape(_A )
for i in range(_A ):
for j in range(_A ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_A )
_lowerCAmelCase : Optional[int] = LinearConstraint(_A , 0 , 0 )
_lowerCAmelCase : Tuple = Bounds(0 , self.regularization )
_lowerCAmelCase : Optional[Any] = minimize(
_A , np.ones(_A ) , bounds=_A , constraints=[ly_contraint] ).x
_lowerCAmelCase : List[str] = l_star
# calculating mean offset of separation plane to points
_lowerCAmelCase : Optional[Any] = 0
for i in range(_A ):
for j in range(_A ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
_lowerCAmelCase : int = s / n
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _A )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 444
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class A_ ( __lowercase ):
'''simple docstring'''
def __init__( self , *_A , **_A) -> None:
"""simple docstring"""
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A)
| 485
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : int , lowercase__ : List[str]=2 , lowercase__ : Optional[Any]=3 , lowercase__ : Dict=6_4 , lowercase__ : List[str]=None ):
__lowercase : Union[str, Any] = np.random.default_rng(lowercase__ )
__lowercase : Dict = length
__lowercase : int = rng.normal(size=(length,) ).astype(np.floataa )
__lowercase : List[str] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : Optional[int] , lowercase__ : List[Any] ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCAmelCase__ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase__ : List[str]=0 , lowercase__ : int=0 , lowercase__ : List[Any]=False ):
super().__init__()
__lowercase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase : int = True
def snake_case ( self : Tuple , lowercase__ : Any=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase : Any = False
return x * self.a[0] + self.b[0]
class lowerCAmelCase__ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : int , lowercase__ : Union[str, Any]=0 , lowercase__ : List[str]=0 , lowercase__ : Tuple=False ):
super().__init__()
__lowercase : Union[str, Any] = torch.nn.Parameter(torch.tensor(lowercase__ ).float() )
__lowercase : Union[str, Any] = torch.nn.Parameter(torch.tensor(lowercase__ ).float() )
__lowercase : int = True
def snake_case ( self : List[str] , lowercase__ : Optional[int]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase : List[Any] = False
return x * self.a + self.b
def snake_case__ ( _lowerCamelCase, _lowerCamelCase = 16 ) ->int:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
__lowercase : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
__lowercase : int = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
__lowercase : Tuple = load_dataset("csv", data_files=_lowerCamelCase )
__lowercase : Optional[int] = datasets["train"].unique("label" )
__lowercase : Optional[Any] = {v: i for i, v in enumerate(_lowerCamelCase )}
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase : Tuple = tokenizer(
examples["sentence1"], examples["sentence2"], truncation=_lowerCamelCase, max_length=_lowerCamelCase, padding="max_length" )
if "label" in examples:
__lowercase : Optional[Any] = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase : Any = datasets.map(
_lowerCamelCase, batched=_lowerCamelCase, remove_columns=["sentence1", "sentence2", "label"], )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCamelCase, padding="max_length", max_length=1_28, return_tensors="pt" )
return tokenizer.pad(_lowerCamelCase, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
__lowercase : Optional[int] = DataLoader(tokenized_datasets["train"], shuffle=_lowerCamelCase, collate_fn=_lowerCamelCase, batch_size=2 )
__lowercase : Union[str, Any] = DataLoader(tokenized_datasets["validation"], shuffle=_lowerCamelCase, collate_fn=_lowerCamelCase, batch_size=1 )
return train_dataloader, eval_dataloader
| 281
|
"""simple docstring"""
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
return abs(_lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a, _lowerCamelCase )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__lowercase ,__lowercase : Any = y, x % y
return abs(_lowerCamelCase )
def snake_case__ ( ) ->Optional[int]:
"""simple docstring"""
try:
__lowercase : Optional[int] = input("Enter two integers separated by comma (,): " ).split("," )
__lowercase : Optional[Any] = int(nums[0] )
__lowercase : str = int(nums[1] )
print(
F'greatest_common_divisor({num_a}, {num_a}) = '
F'{greatest_common_divisor(_lowerCamelCase, _lowerCamelCase )}' )
print(F'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowerCamelCase, _lowerCamelCase )}' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 281
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 418
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str = "cpu" , __A : Union[str, None] = None ) -> None:
_SCREAMING_SNAKE_CASE = torch.load(__A , map_location=__A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__A , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_SCREAMING_SNAKE_CASE = v.half()
if save_path is None: # overwrite src_path
_SCREAMING_SNAKE_CASE = src_path
torch.save(__A , __A )
if __name__ == "__main__":
fire.Fire(convert)
| 418
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a: Union[str, Any] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[str] = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a: Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a: Any = logging.get_logger(__name__)
# General docstring
__a: List[str] = '''RegNetConfig'''
# Base docstring
__a: int = '''facebook/regnet-y-040'''
__a: Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
__a: int = '''facebook/regnet-y-040'''
__a: Any = '''tabby, tabby cat'''
__a: List[str] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : int = 3 , lowerCamelCase : int = 1 , lowerCamelCase : int = 1 , lowerCamelCase : Optional[str] = "relu" , **lowerCamelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(**lowerCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCAmelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCAmelCase = tf.keras.layers.ConvaD(
filters=lowerCamelCase , kernel_size=lowerCamelCase , strides=lowerCamelCase , padding="""VALID""" , groups=lowerCamelCase , use_bias=lowerCamelCase , name="""convolution""" , )
_UpperCAmelCase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
_UpperCAmelCase = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase ( self : Any , lowerCamelCase : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.convolution(self.padding(lowerCamelCase ) )
_UpperCAmelCase = self.normalization(lowerCamelCase )
_UpperCAmelCase = self.activation(lowerCamelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : RegNetConfig , **lowerCamelCase : str ) -> str:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = config.num_channels
_UpperCAmelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowerCamelCase ( self : str , lowerCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = shape_list(lowerCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCAmelCase = tf.transpose(lowerCamelCase , perm=(0, 2, 3, 1) )
_UpperCAmelCase = self.embedder(lowerCamelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : int = 2 , **lowerCamelCase : Any ) -> str:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = tf.keras.layers.ConvaD(
filters=lowerCamelCase , kernel_size=1 , strides=lowerCamelCase , use_bias=lowerCamelCase , name="""convolution""" )
_UpperCAmelCase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def lowerCamelCase ( self : int , lowerCamelCase : tf.Tensor , lowerCamelCase : bool = False ) -> tf.Tensor:
"""simple docstring"""
return self.normalization(self.convolution(lowerCamelCase ) , training=lowerCamelCase )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int , **lowerCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase , name="""pooler""" )
_UpperCAmelCase = [
tf.keras.layers.ConvaD(filters=lowerCamelCase , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowerCamelCase ( self : str , lowerCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_UpperCAmelCase = self.pooler(lowerCamelCase )
for layer_module in self.attention:
_UpperCAmelCase = layer_module(lowerCamelCase )
_UpperCAmelCase = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : RegNetConfig , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int = 1 , **lowerCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = in_channels != out_channels or stride != 1
_UpperCAmelCase = max(1 , out_channels // config.groups_width )
_UpperCAmelCase = (
TFRegNetShortCut(lowerCamelCase , stride=lowerCamelCase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCAmelCase = [
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=lowerCamelCase , name="""layer.2""" ),
]
_UpperCAmelCase = ACTaFN[config.hidden_act]
def lowerCamelCase ( self : Dict , lowerCamelCase : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = hidden_state
for layer_module in self.layers:
_UpperCAmelCase = layer_module(lowerCamelCase )
_UpperCAmelCase = self.shortcut(lowerCamelCase )
hidden_state += residual
_UpperCAmelCase = self.activation(lowerCamelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : RegNetConfig , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int = 1 , **lowerCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = in_channels != out_channels or stride != 1
_UpperCAmelCase = max(1 , out_channels // config.groups_width )
_UpperCAmelCase = (
TFRegNetShortCut(lowerCamelCase , stride=lowerCamelCase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
_UpperCAmelCase = [
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=lowerCamelCase , name="""layer.3""" ),
]
_UpperCAmelCase = ACTaFN[config.hidden_act]
def lowerCamelCase ( self : Dict , lowerCamelCase : Tuple ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = hidden_state
for layer_module in self.layers:
_UpperCAmelCase = layer_module(lowerCamelCase )
_UpperCAmelCase = self.shortcut(lowerCamelCase )
hidden_state += residual
_UpperCAmelCase = self.activation(lowerCamelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : RegNetConfig , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , **lowerCamelCase : Dict ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
_UpperCAmelCase = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase , lowerCamelCase , lowerCamelCase , stride=lowerCamelCase , name="""layers.0""" ),
*[layer(lowerCamelCase , lowerCamelCase , lowerCamelCase , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCamelCase ( self : List[str] , lowerCamelCase : Tuple ) -> int:
"""simple docstring"""
for layer_module in self.layers:
_UpperCAmelCase = layer_module(lowerCamelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : RegNetConfig , **lowerCamelCase : Tuple ) -> Any:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
_UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase , lowerCamelCase , lowerCamelCase , depth=lowerCamelCase , name=f"""stages.{i+1}""" ) )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : tf.Tensor , lowerCamelCase : bool = False , lowerCamelCase : bool = True ) -> TFBaseModelOutputWithNoAttention:
"""simple docstring"""
_UpperCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase = hidden_states + (hidden_state,)
_UpperCAmelCase = stage_module(lowerCamelCase )
if output_hidden_states:
_UpperCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase , hidden_states=lowerCamelCase )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
_lowerCamelCase = RegNetConfig
def __init__( self : int , lowerCamelCase : Dict , **lowerCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = config
_UpperCAmelCase = TFRegNetEmbeddings(lowerCamelCase , name="""embedder""" )
_UpperCAmelCase = TFRegNetEncoder(lowerCamelCase , name="""encoder""" )
_UpperCAmelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase , name="""pooler""" )
@unpack_inputs
def lowerCamelCase ( self : int , lowerCamelCase : tf.Tensor , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.embedder(lowerCamelCase , training=lowerCamelCase )
_UpperCAmelCase = self.encoder(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase )
_UpperCAmelCase = encoder_outputs[0]
_UpperCAmelCase = self.pooler(lowerCamelCase )
# Change to NCHW output format have uniformity in the modules
_UpperCAmelCase = tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) )
_UpperCAmelCase = tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCAmelCase = tuple([tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase , pooler_output=lowerCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = RegNetConfig
_lowerCamelCase = '''regnet'''
_lowerCamelCase = '''pixel_values'''
@property
def lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__a: Dict = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
__a: Union[str, Any] = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowerCamelCase : RegNetConfig , *lowerCamelCase : int , **lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = TFRegNetMainLayer(lowerCamelCase , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase ( self : List[str] , lowerCamelCase : tf.Tensor , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Tuple=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
"""simple docstring"""
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.regnet(
pixel_values=lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : RegNetConfig , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> str:
"""simple docstring"""
super().__init__(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = TFRegNetMainLayer(lowerCamelCase , name="""regnet""" )
# classification head
_UpperCAmelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : tf.Tensor = None , lowerCamelCase : tf.Tensor = None , lowerCamelCase : bool = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[Any]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
"""simple docstring"""
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.regnet(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase )
_UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase = self.classifier[0](lowerCamelCase )
_UpperCAmelCase = self.classifier[1](lowerCamelCase )
_UpperCAmelCase = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase , logits=lowerCamelCase )
if not return_dict:
_UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states )
| 402
| 0
|
from math import isclose, sqrt
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
__lowerCAmelCase = point_y / 4 / point_x
__lowerCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCAmelCase = outgoing_gradient**2 + 4
__lowerCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
__lowerCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCAmelCase = x_minus if isclose(lowerCAmelCase_, lowerCAmelCase_ ) else x_plus
__lowerCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a_ ( lowerCAmelCase_ : float = 1.4, lowerCAmelCase_ : float = -9.6 ):
__lowerCAmelCase = 0
__lowerCAmelCase = first_x_coord
__lowerCAmelCase = first_y_coord
__lowerCAmelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = next_point(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53
|
'''simple docstring'''
_UpperCAmelCase : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def UpperCamelCase ( lowercase_ : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
lowercase =f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(lowercase_ )
lowercase =''''''.join(bin(lowercase_ )[2:].zfill(8 ) for byte in data )
lowercase =len(lowercase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase =b'''=''' * ((6 - len(lowercase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase_ ) % 6)
else:
lowercase =b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase_ ) , 6 ) ).encode()
+ padding
)
def UpperCamelCase ( lowercase_ : str ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
lowercase =(
'''argument should be a bytes-like object or ASCII string, '''
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(lowercase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase_ , lowercase_ ):
try:
lowercase =encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
lowercase =encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase =encoded_data[:-padding]
lowercase =''''''.join(
bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase =''''''.join(
bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase =[
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase_ ) , 8 )
]
return bytes(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
| 0
|
import math
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = []
lowercase_ = 2
lowercase_ = int(math.sqrt(__lowerCamelCase ) ) # Size of every segment
lowercase_ = [True] * (end + 1)
lowercase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__lowerCamelCase )
for i in range(start * start , end + 1 , __lowerCamelCase ):
lowercase_ = False
start += 1
prime += in_prime
lowercase_ = end + 1
lowercase_ = min(2 * end , __lowerCamelCase )
while low <= n:
lowercase_ = [True] * (high - low + 1)
for each in in_prime:
lowercase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__lowerCamelCase , high + 1 , __lowerCamelCase ):
lowercase_ = False
for j in range(len(__lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
lowercase_ = high + 1
lowercase_ = min(high + end , __lowerCamelCase )
return prime
print(sieve(1_0**6))
| 601
|
# using dfs for finding eulerian path traversal
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any]=None ):
'''simple docstring'''
lowercase_ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowercase_ , lowercase_ = True, True
lowercase_ = dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return path
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = -1
for i in range(__lowerCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowercase_ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] ):
'''simple docstring'''
lowercase_ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowercase_ , lowercase_ = check_circuit_or_path(__lowerCamelCase , __lowerCamelCase )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
lowercase_ = 1
if check == 2:
lowercase_ = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
lowercase_ = dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowercase_ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowercase_ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowercase_ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowercase_ = {
1: [],
2: []
# all degree is zero
}
lowercase_ = 10
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 601
| 1
|
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
# Initialise PyTorch model
A_ : List[str] = TaConfig.from_json_file(snake_case__ )
print(F"""Building PyTorch model from configuration: {config}""" )
A_ : List[str] = TaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 180
|
"""simple docstring"""
from __future__ import annotations
def __UpperCamelCase ( snake_case__ , snake_case__ ):
if len(snake_case__ ) == 0:
return False
A_ : Union[str, Any] = len(snake_case__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case__ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = input("Enter numbers separated by comma:\n").strip()
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(",")]
_lowerCAmelCase = int(input("Enter the number to be found in the list:\n").strip())
_lowerCAmelCase = "" if binary_search(sequence, target) else "not "
print(F'{target} was {not_str}found in {sequence}')
| 180
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
SCREAMING_SNAKE_CASE_ = {
"""ctrl""": 2_56,
}
SCREAMING_SNAKE_CASE_ = {
"""Pregnancy""": 16_86_29,
"""Christianity""": 76_75,
"""Explain""": 10_64_23,
"""Fitness""": 6_34_40,
"""Saving""": 6_31_63,
"""Ask""": 2_71_71,
"""Ass""": 9_59_85,
"""Joke""": 16_35_09,
"""Questions""": 4_56_22,
"""Thoughts""": 4_96_05,
"""Retail""": 5_23_42,
"""Feminism""": 16_43_38,
"""Writing""": 1_19_92,
"""Atheism""": 19_22_63,
"""Netflix""": 4_86_16,
"""Computing""": 3_96_39,
"""Opinion""": 4_32_13,
"""Alone""": 4_49_67,
"""Funny""": 5_89_17,
"""Gaming""": 4_03_58,
"""Human""": 40_88,
"""India""": 13_31,
"""Joker""": 7_71_38,
"""Diet""": 3_62_06,
"""Legal""": 1_18_59,
"""Norman""": 49_39,
"""Tip""": 7_26_89,
"""Weight""": 5_23_43,
"""Movies""": 4_62_73,
"""Running""": 2_34_25,
"""Science""": 20_90,
"""Horror""": 3_77_93,
"""Confession""": 6_05_72,
"""Finance""": 1_22_50,
"""Politics""": 1_63_60,
"""Scary""": 19_19_85,
"""Support""": 1_26_54,
"""Technologies""": 3_25_16,
"""Teenage""": 6_61_60,
"""Event""": 3_27_69,
"""Learned""": 6_74_60,
"""Notion""": 18_27_70,
"""Wikipedia""": 3_75_83,
"""Books""": 66_65,
"""Extract""": 7_60_50,
"""Confessions""": 10_27_01,
"""Conspiracy""": 7_59_32,
"""Links""": 6_36_74,
"""Narcissus""": 15_04_25,
"""Relationship""": 5_47_66,
"""Relationships""": 13_47_96,
"""Reviews""": 4_16_71,
"""News""": 42_56,
"""Translation""": 2_68_20,
"""multilingual""": 12_84_06,
}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
a_ : List[Any] = set()
a_ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a_ : Optional[int] = char
a_ : int = set(SCREAMING_SNAKE_CASE__ )
return pairs
class snake_case_ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTROL_CODES
def __init__( self , a_ , a_ , a_="<unk>" , **a_ ):
super().__init__(unk_token=a_ , **a_ )
with open(a_ , encoding="utf-8" ) as vocab_handle:
a_ : Optional[int] = json.load(a_ )
a_ : List[Any] = {v: k for k, v in self.encoder.items()}
with open(a_ , encoding="utf-8" ) as merges_handle:
a_ : List[str] = merges_handle.read().split("\n" )[1:-1]
a_ : Tuple = [tuple(merge.split() ) for merge in merges]
a_ : Union[str, Any] = dict(zip(a_ , range(len(a_ ) ) ) )
a_ : int = {}
@property
def snake_case_ ( self ):
return len(self.encoder )
def snake_case_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self , a_ ):
if token in self.cache:
return self.cache[token]
a_ : Optional[Any] = tuple(a_ )
a_ : Tuple = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
a_ : int = get_pairs(a_ )
if not pairs:
return token
while True:
a_ : Any = min(a_ , key=lambda a_ : self.bpe_ranks.get(a_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
a_ , a_ : Tuple = bigram
a_ : Tuple = []
a_ : Tuple = 0
while i < len(a_ ):
try:
a_ : Optional[int] = word.index(a_ , a_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a_ : int = j
if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a_ : Optional[Any] = tuple(a_ )
a_ : Tuple = new_word
if len(a_ ) == 1:
break
else:
a_ : int = get_pairs(a_ )
a_ : str = "@@ ".join(a_ )
a_ : Tuple = word[:-4]
a_ : Union[str, Any] = word
return word
def snake_case_ ( self , a_ ):
a_ : Tuple = []
a_ : Tuple = re.findall(R"\S+\n?" , a_ )
for token in words:
split_tokens.extend(list(self.bpe(a_ ).split(" " ) ) )
return split_tokens
def snake_case_ ( self , a_ ):
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def snake_case_ ( self , a_ ):
return self.decoder.get(a_ , self.unk_token )
def snake_case_ ( self , a_ ):
a_ : Optional[int] = " ".join(a_ ).replace("@@ " , "" ).strip()
return out_string
def snake_case_ ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : Optional[Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a_ : List[Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a_ , ensure_ascii=a_ ) + "\n" )
a_ : Any = 0
with open(a_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
a_ : int = token_index
writer.write(" ".join(a_ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 370
|
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0 # The first color of the flag.
SCREAMING_SNAKE_CASE_ = 1 # The second color of the flag.
SCREAMING_SNAKE_CASE_ = 2 # The third color of the flag.
SCREAMING_SNAKE_CASE_ = (red, white, blue)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> list:
if not sequence:
return []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return list(SCREAMING_SNAKE_CASE__ )
a_ : Dict = 0
a_ : List[Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
a_ : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
a_ , a_ : int = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
a_ , a_ : List[Any] = sequence[high], sequence[mid]
high -= 1
else:
a_ : Dict = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = input("""Enter numbers separated by commas:\n""").strip()
SCREAMING_SNAKE_CASE_ = [int(item.strip()) for item in user_input.split(""",""")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 370
| 1
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline
A_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
A_ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a (self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__snake_case = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__snake_case = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__snake_case = CLIPTextModel(a__ )
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a (self : str , a__ : Union[str, Any] , a__ : Any=0 ):
"""simple docstring"""
if str(a__ ).startswith('''mps''' ):
__snake_case = torch.manual_seed(a__ )
else:
__snake_case = torch.Generator(device=a__ ).manual_seed(a__ )
__snake_case = 2
__snake_case = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=a__ , device=torch.device(a__ ) , )
__snake_case = floats_tensor(control_image.shape , rng=random.Random(a__ ) ).to(a__ )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def a (self : Tuple ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a (self : Optional[Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def a (self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : str = StableDiffusionControlNetImgaImgPipeline
A_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Optional[int] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def a (self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(a__ : Optional[Any] ):
if isinstance(a__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__snake_case = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(a__ )
torch.manual_seed(0 )
__snake_case = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(a__ )
torch.manual_seed(0 )
__snake_case = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__snake_case = CLIPTextModel(a__ )
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = MultiControlNetModel([controlneta, controlneta] )
__snake_case = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a (self : int , a__ : List[str] , a__ : Union[str, Any]=0 ):
"""simple docstring"""
if str(a__ ).startswith('''mps''' ):
__snake_case = torch.manual_seed(a__ )
else:
__snake_case = torch.Generator(device=a__ ).manual_seed(a__ )
__snake_case = 2
__snake_case = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=a__ , device=torch.device(a__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=a__ , device=torch.device(a__ ) , ),
]
__snake_case = floats_tensor(control_image[0].shape , rng=random.Random(a__ ) ).to(a__ )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def a (self : str ):
"""simple docstring"""
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**a__ )
pipe.to(a__ )
__snake_case = 1_0.0
__snake_case = 4
__snake_case = self.get_dummy_inputs(a__ )
__snake_case = steps
__snake_case = scale
__snake_case = pipe(**a__ )[0]
__snake_case = self.get_dummy_inputs(a__ )
__snake_case = steps
__snake_case = scale
__snake_case = pipe(**a__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__snake_case = self.get_dummy_inputs(a__ )
__snake_case = steps
__snake_case = scale
__snake_case = pipe(**a__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__snake_case = self.get_dummy_inputs(a__ )
__snake_case = steps
__snake_case = scale
__snake_case = pipe(**a__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def a (self : List[Any] ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a (self : List[Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def a (self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def a (self : int ):
"""simple docstring"""
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(a__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a (self : int ):
"""simple docstring"""
__snake_case = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
__snake_case = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=a__ , controlnet=a__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a__ )
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case = '''evil space-punk bird'''
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
__snake_case = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
__snake_case = pipe(
a__ , a__ , control_image=a__ , generator=a__ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 592
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case_ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case_ = concatenate_datasets
snake_case_ = DownloadConfig
snake_case_ = DownloadManager
snake_case_ = DownloadMode
snake_case_ = DownloadConfig
snake_case_ = DownloadMode
snake_case_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 592
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase = '''src/diffusers'''
lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase = spec.loader.load_module()
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] ) -> Union[str, Any]:
return line.startswith(UpperCAmelCase__ ) or len(UpperCAmelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase__ ) is not None
def __lowerCAmelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowerCamelCase_ = object_name.split(""".""" )
lowerCamelCase_ = 0
# First let's find the module where our object lives.
lowerCamelCase_ = parts[i]
while i < len(UpperCAmelCase__ ) and not os.path.isfile(os.path.join(UpperCAmelCase__ , F'''{module}.py''' ) ):
i += 1
if i < len(UpperCAmelCase__ ):
lowerCamelCase_ = os.path.join(UpperCAmelCase__ , parts[i] )
if i >= len(UpperCAmelCase__ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(UpperCAmelCase__ , F'''{module}.py''' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ = f.readlines()
# Now let's find the class / func in the code!
lowerCamelCase_ = """"""
lowerCamelCase_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase__ ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCamelCase_ = line_index
while line_index < len(UpperCAmelCase__ ) and _should_continue(lines[line_index] , UpperCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase_ = lines[start_index:line_index]
return "".join(UpperCAmelCase__ )
lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def __lowerCAmelCase ( UpperCAmelCase__ : Tuple ) -> Any:
lowerCamelCase_ = code.split("""\n""" )
lowerCamelCase_ = 0
while idx < len(UpperCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase__ ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] ) -> Dict:
lowerCamelCase_ = len(get_indent(UpperCAmelCase__ ) ) > 0
if has_indent:
lowerCamelCase_ = F'''class Bla:\n{code}'''
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=UpperCAmelCase__ )
lowerCamelCase_ = black.format_str(UpperCAmelCase__ , mode=UpperCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = style_docstrings_in_code(UpperCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __lowerCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str]=False ) -> str:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = []
lowerCamelCase_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase__ ):
lowerCamelCase_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = search.groups()
lowerCamelCase_ = find_code_in_diffusers(UpperCAmelCase__ )
lowerCamelCase_ = get_indent(UpperCAmelCase__ )
lowerCamelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCamelCase_ = theoretical_indent
lowerCamelCase_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCamelCase_ = True
while line_index < len(UpperCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
break
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _should_continue(UpperCAmelCase__ , UpperCAmelCase__ ) and re.search(F'''^{indent}# End copy''' , UpperCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase_ = lines[start_index:line_index]
lowerCamelCase_ = """""".join(UpperCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCamelCase_ = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase__ ) is None]
lowerCamelCase_ = """\n""".join(UpperCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase__ ) > 0:
lowerCamelCase_ = replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowerCamelCase_ = [_re_replace_pattern.search(UpperCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = pattern.groups()
lowerCamelCase_ = re.sub(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if option.strip() == "all-casing":
lowerCamelCase_ = re.sub(obja.lower() , obja.lower() , UpperCAmelCase__ )
lowerCamelCase_ = re.sub(obja.upper() , obja.upper() , UpperCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCamelCase_ = blackify(lines[start_index - 1] + theoretical_code )
lowerCamelCase_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCamelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCamelCase_ = start_index + 1
if overwrite and len(UpperCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
return diffs
def __lowerCAmelCase ( UpperCAmelCase__ : bool = False ) -> Any:
lowerCamelCase_ = glob.glob(os.path.join(UpperCAmelCase__ , """**/*.py""" ) , recursive=UpperCAmelCase__ )
lowerCamelCase_ = []
for filename in all_files:
lowerCamelCase_ = is_copy_consistent(UpperCAmelCase__ , UpperCAmelCase__ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(UpperCAmelCase__ ) > 0:
lowerCamelCase_ = """\n""".join(UpperCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 716
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE = '''text_classifier'''
SCREAMING_SNAKE_CASE = AutoTokenizer
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE = ['''text''']
def lowercase__ ( self : Union[str, Any] ):
super().setup()
lowerCamelCase_ = self.model.config
lowerCamelCase_ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
lowerCamelCase_ = int(__UpperCamelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowercase__ ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ):
lowerCamelCase_ = labels
return self.pre_processor(
[text] * len(__UpperCamelCase ) , [F'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[str] ):
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 103
| 0
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_UpperCAmelCase : Dict = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] = None , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : Optional[int] = True , ) -> Tuple:
lowerCamelCase__ : Tuple = [file for file in os.listdir(lowerCamelCase__ ) if os.path.isfile(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )]
if identifier is not None:
lowerCamelCase__ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for n_ in n_identifier:
lowerCamelCase__ : Dict = [file for file in files if n_ not in file]
else:
lowerCamelCase__ : Optional[int] = [file for file in files if n_identifier not in file]
lowerCamelCase__ : Union[str, Any] = ignore_files or []
ignore_files.append('__init__.py' )
lowerCamelCase__ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowerCamelCase__ )
if only_modules:
lowerCamelCase__ : List[Any] = file.split('.' )[0]
try:
lowerCamelCase__ : Tuple = getattr(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase__ : Dict = doctest.DocTestSuite(lowerCamelCase__ )
lowerCamelCase__ : Any = unittest.TextTestRunner().run(lowerCamelCase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
lowerCamelCase__ : List[str] = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A_ ( self : int ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Path('src/transformers' )
lowerCamelCase__ : Optional[int] = """modeling"""
lowerCamelCase__ : Optional[int] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowerCamelCase__ , identifier=lowerCamelCase__ , ignore_files=lowerCamelCase__ )
def A_ ( self : Dict ) -> int:
lowerCamelCase__ : Optional[Any] = Path('src/transformers' )
lowerCamelCase__ : Tuple = """tokenization"""
self.analyze_directory(lowerCamelCase__ , identifier=lowerCamelCase__ )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Tuple = Path('src/transformers' )
lowerCamelCase__ : Any = """configuration"""
self.analyze_directory(lowerCamelCase__ , identifier=lowerCamelCase__ )
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : Any = Path('src/transformers' )
lowerCamelCase__ : List[Any] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowerCamelCase__ , n_identifier=lowerCamelCase__ )
def A_ ( self : List[Any] ) -> Tuple:
lowerCamelCase__ : Optional[int] = Path('docs/source' )
lowerCamelCase__ : Tuple = ["""favicon.ico"""]
self.analyze_directory(lowerCamelCase__ , ignore_files=lowerCamelCase__ , only_modules=lowerCamelCase__ )
| 295
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
@property
def _A ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.dummy_uncond_unet
lowerCAmelCase__ : Optional[int] = PNDMScheduler()
lowerCAmelCase__ : Dict = PNDMPipeline(unet=a__ , scheduler=a__ )
pndm.to(a__ )
pndm.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Any = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = pndm(generator=a__ , num_inference_steps=20 , output_type="numpy" ).images
lowerCAmelCase__ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : Any = pndm(generator=a__ , num_inference_steps=20 , output_type="numpy" , return_dict=a__ )[0]
lowerCAmelCase__ : Any = image[0, -3:, -3:, -1]
lowerCAmelCase__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : List[str] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = "google/ddpm-cifar10-32"
lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(a__ )
lowerCAmelCase__ : int = PNDMScheduler()
lowerCAmelCase__ : Optional[int] = PNDMPipeline(unet=a__ , scheduler=a__ )
pndm.to(a__ )
pndm.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = pndm(generator=a__ , output_type="numpy" ).images
lowerCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : List[str] = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 706
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case = logging.getLogger(__name__)
snake_case = 50 # max width of layer names
snake_case = 70 # max width of quantizer names
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=lowerCamelCase_ , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=lowerCamelCase_ , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=lowerCamelCase_ , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=lowerCamelCase_ , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=lowerCamelCase_ , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=lowerCamelCase_ , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
lowerCAmelCase__ : int = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
lowerCAmelCase__ : List[str] = "histogram"
elif args.calibrator == "mse":
lowerCAmelCase__ : Any = "histogram"
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
lowerCAmelCase__ : Union[str, Any] = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase_ )
lowerCAmelCase__ : Any = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False ):
"""simple docstring"""
logger.info("Configuring Model for Quantization" )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase_ , ["embeddings"] , which="weight" , _disabled=lowerCamelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase_ , [""] , _disabled=lowerCamelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase_ , args.quant_disable_keyword , _disabled=lowerCamelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase_ , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=lowerCamelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase_ , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=lowerCamelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase_ , lowerCamelCase_ )
if args.clip_gelu:
clip_gelu(lowerCamelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
def fusea(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase_ , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
lowerCAmelCase__ : Optional[int] = qq._amax.detach().item()
lowerCAmelCase__ : Optional[int] = qk._amax.detach().item()
lowerCAmelCase__ : Any = qv._amax.detach().item()
lowerCAmelCase__ : List[str] = max(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
qq._amax.fill_(lowerCamelCase_ )
qk._amax.fill_(lowerCamelCase_ )
qv._amax.fill_(lowerCamelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
lowerCAmelCase__ : Any = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase_ )
lowerCAmelCase__ : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
lowerCAmelCase__ : Optional[int] = mod.weight.shape[0]
lowerCAmelCase__ : List[str] = mod._weight_quantizer._amax.detach()
lowerCAmelCase__ : List[str] = torch.ones(lowerCamelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCAmelCase__ : List[Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCAmelCase__ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCAmelCase__ : Tuple = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase_ , keepdims=lowerCamelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
lowerCAmelCase__ : str = amax
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=2_5 , lowerCamelCase_=1_8_0 , lowerCamelCase_=None ):
"""simple docstring"""
if ignore is None:
lowerCAmelCase__ : str = []
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ : List[Any] = [ignore]
lowerCAmelCase__ : Optional[int] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase_ , "weight" ):
continue
lowerCAmelCase__ : str = max(lowerCamelCase_ , len(lowerCamelCase_ ) )
for name, mod in model.named_modules():
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ , "_input_quantizer" , lowerCamelCase_ )
lowerCAmelCase__ : Union[str, Any] = getattr(lowerCamelCase_ , "_weight_quantizer" , lowerCamelCase_ )
if not hasattr(lowerCamelCase_ , "weight" ):
continue
if type(lowerCamelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase_ ) is str and s in name]:
continue
lowerCAmelCase__ : List[Any] = f'''Act:{input_q.extra_repr()}'''
lowerCAmelCase__ : Optional[int] = f'''Wgt:{weight_q.extra_repr()}'''
lowerCAmelCase__ : int = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCamelCase_ ) <= line_width:
logger.info(lowerCamelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{' ':{name_width}} {wgt_str}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase_ , lowerCamelCase_ )
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="both" , **lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCamelCase_ , lowerCamelCase_ , "_input_quantizer" , lowerCamelCase_ , lowerCamelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase_ , lowerCamelCase_ , "_weight_quantizer" , lowerCamelCase_ , lowerCamelCase_ )
logger.info(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_input_quantizer" ) or hasattr(lowerCamelCase_ , "_weight_quantizer" ):
for n in names:
if re.search(lowerCamelCase_ , lowerCamelCase_ ):
set_quantizers(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ : Optional[Any] = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
logger.info(lowerCamelCase_ )
| 568
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase(_lowercase ):
__snake_case: jnp.ndarray
__snake_case: jnp.ndarray
class lowercase(nn.Module ):
__snake_case: int
__snake_case: Tuple[int] = (16, 32, 96, 256)
__snake_case: jnp.dtype = jnp.floataa
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a__ = []
for i in range(len(self.block_out_channels ) - 1 ):
a__ = self.block_out_channels[i]
a__ = self.block_out_channels[i + 1]
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
a__ = blocks
a__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
a__ = self.conv_in(__SCREAMING_SNAKE_CASE )
a__ = nn.silu(__SCREAMING_SNAKE_CASE )
for block in self.blocks:
a__ = block(__SCREAMING_SNAKE_CASE )
a__ = nn.silu(__SCREAMING_SNAKE_CASE )
a__ = self.conv_out(__SCREAMING_SNAKE_CASE )
return embedding
@flax_register_to_config
class lowercase(nn.Module , _lowercase , _lowercase ):
__snake_case: int = 32
__snake_case: int = 4
__snake_case: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__snake_case: Union[bool, Tuple[bool]] = False
__snake_case: Tuple[int] = (320, 640, 1280, 1280)
__snake_case: int = 2
__snake_case: Union[int, Tuple[int]] = 8
__snake_case: Optional[Union[int, Tuple[int]]] = None
__snake_case: int = 1280
__snake_case: float = 0.0
__snake_case: bool = False
__snake_case: jnp.dtype = jnp.floataa
__snake_case: bool = True
__snake_case: int = 0
__snake_case: str = "rgb"
__snake_case: Tuple[int] = (16, 32, 96, 256)
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> FrozenDict:
"""simple docstring"""
a__ = (1, self.in_channels, self.sample_size, self.sample_size)
a__ = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
a__ = jnp.ones((1,) , dtype=jnp.intaa )
a__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
a__ = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
a__ , a__ = jax.random.split(__SCREAMING_SNAKE_CASE )
a__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )["params"]
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = self.block_out_channels
a__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a__ = self.num_attention_heads or self.attention_head_dim
# input
a__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a__ = FlaxTimestepEmbedding(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
a__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
a__ = self.only_cross_attention
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ = (num_attention_heads,) * len(self.down_block_types )
# down
a__ = []
a__ = []
a__ = block_out_channels[0]
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
for i, down_block_type in enumerate(self.down_block_types ):
a__ = output_channel
a__ = block_out_channels[i]
a__ = i == len(__SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a__ = FlaxCrossAttnDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
a__ = FlaxDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__SCREAMING_SNAKE_CASE )
for _ in range(self.layers_per_block ):
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
if not is_final_block:
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
a__ = down_blocks
a__ = controlnet_down_blocks
# mid
a__ = block_out_channels[-1]
a__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
a__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
a__ = jnp.flip(__SCREAMING_SNAKE_CASE , axis=1 )
# 1. time
if not isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ):
a__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0:
a__ = timesteps.astype(dtype=jnp.floataa )
a__ = jnp.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
a__ = self.time_proj(__SCREAMING_SNAKE_CASE )
a__ = self.time_embedding(__SCREAMING_SNAKE_CASE )
# 2. pre-process
a__ = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
a__ = self.conv_in(__SCREAMING_SNAKE_CASE )
a__ = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
a__ = self.controlnet_cond_embedding(__SCREAMING_SNAKE_CASE )
sample += controlnet_cond
# 3. down
a__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ , a__ = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
else:
a__ , a__ = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
a__ = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
# 5. contronet blocks
a__ = ()
for down_block_res_sample, controlnet_block in zip(__SCREAMING_SNAKE_CASE , self.controlnet_down_blocks ):
a__ = controlnet_block(__SCREAMING_SNAKE_CASE )
controlnet_down_block_res_samples += (down_block_res_sample,)
a__ = controlnet_down_block_res_samples
a__ = self.controlnet_mid_block(__SCREAMING_SNAKE_CASE )
# 6. scaling
a__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__SCREAMING_SNAKE_CASE , mid_block_res_sample=__SCREAMING_SNAKE_CASE )
| 273
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase(_lowercase ):
__snake_case: jnp.ndarray
__snake_case: jnp.ndarray
class lowercase(nn.Module ):
__snake_case: int
__snake_case: Tuple[int] = (16, 32, 96, 256)
__snake_case: jnp.dtype = jnp.floataa
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a__ = []
for i in range(len(self.block_out_channels ) - 1 ):
a__ = self.block_out_channels[i]
a__ = self.block_out_channels[i + 1]
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
a__ = blocks
a__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
a__ = self.conv_in(__SCREAMING_SNAKE_CASE )
a__ = nn.silu(__SCREAMING_SNAKE_CASE )
for block in self.blocks:
a__ = block(__SCREAMING_SNAKE_CASE )
a__ = nn.silu(__SCREAMING_SNAKE_CASE )
a__ = self.conv_out(__SCREAMING_SNAKE_CASE )
return embedding
@flax_register_to_config
class lowercase(nn.Module , _lowercase , _lowercase ):
__snake_case: int = 32
__snake_case: int = 4
__snake_case: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__snake_case: Union[bool, Tuple[bool]] = False
__snake_case: Tuple[int] = (320, 640, 1280, 1280)
__snake_case: int = 2
__snake_case: Union[int, Tuple[int]] = 8
__snake_case: Optional[Union[int, Tuple[int]]] = None
__snake_case: int = 1280
__snake_case: float = 0.0
__snake_case: bool = False
__snake_case: jnp.dtype = jnp.floataa
__snake_case: bool = True
__snake_case: int = 0
__snake_case: str = "rgb"
__snake_case: Tuple[int] = (16, 32, 96, 256)
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> FrozenDict:
"""simple docstring"""
a__ = (1, self.in_channels, self.sample_size, self.sample_size)
a__ = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
a__ = jnp.ones((1,) , dtype=jnp.intaa )
a__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
a__ = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
a__ , a__ = jax.random.split(__SCREAMING_SNAKE_CASE )
a__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )["params"]
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = self.block_out_channels
a__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a__ = self.num_attention_heads or self.attention_head_dim
# input
a__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a__ = FlaxTimestepEmbedding(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
a__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
a__ = self.only_cross_attention
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ = (num_attention_heads,) * len(self.down_block_types )
# down
a__ = []
a__ = []
a__ = block_out_channels[0]
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
for i, down_block_type in enumerate(self.down_block_types ):
a__ = output_channel
a__ = block_out_channels[i]
a__ = i == len(__SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a__ = FlaxCrossAttnDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
a__ = FlaxDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__SCREAMING_SNAKE_CASE )
for _ in range(self.layers_per_block ):
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
if not is_final_block:
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
a__ = down_blocks
a__ = controlnet_down_blocks
# mid
a__ = block_out_channels[-1]
a__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
a__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
a__ = jnp.flip(__SCREAMING_SNAKE_CASE , axis=1 )
# 1. time
if not isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ):
a__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0:
a__ = timesteps.astype(dtype=jnp.floataa )
a__ = jnp.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
a__ = self.time_proj(__SCREAMING_SNAKE_CASE )
a__ = self.time_embedding(__SCREAMING_SNAKE_CASE )
# 2. pre-process
a__ = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
a__ = self.conv_in(__SCREAMING_SNAKE_CASE )
a__ = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
a__ = self.controlnet_cond_embedding(__SCREAMING_SNAKE_CASE )
sample += controlnet_cond
# 3. down
a__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ , a__ = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
else:
a__ , a__ = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
a__ = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
# 5. contronet blocks
a__ = ()
for down_block_res_sample, controlnet_block in zip(__SCREAMING_SNAKE_CASE , self.controlnet_down_blocks ):
a__ = controlnet_block(__SCREAMING_SNAKE_CASE )
controlnet_down_block_res_samples += (down_block_res_sample,)
a__ = controlnet_down_block_res_samples
a__ = self.controlnet_mid_block(__SCREAMING_SNAKE_CASE )
# 6. scaling
a__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__SCREAMING_SNAKE_CASE , mid_block_res_sample=__SCREAMING_SNAKE_CASE )
| 273
| 1
|
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __UpperCamelCase ( unittest.TestCase ):
lowercase : Union[str, Any] = JukeboxTokenizer
lowercase : Union[str, Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def a__ ( self :Dict ):
import torch
snake_case_ : int = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
snake_case_ : Optional[Any] = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
snake_case_ : Union[str, Any] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def a__ ( self :Optional[int] ):
import torch
snake_case_ : Union[str, Any] = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
snake_case_ : List[Any] = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
snake_case_ : Union[str, Any] = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
| 267
|
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Dict = TransfoXLTokenizer
lowercase : Optional[Any] = False
lowercase : Dict = False
def a__ ( self :Union[str, Any] ):
super().setUp()
snake_case_ : Optional[int] = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
snake_case_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a__ ( self :List[Any] ,**_UpperCamelCase :Optional[Any] ):
snake_case_ : Tuple = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname ,**_UpperCamelCase )
def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : Any = """<unk> UNwanted , running"""
snake_case_ : Optional[int] = """<unk> unwanted, running"""
return input_text, output_text
def a__ ( self :Dict ):
snake_case_ : Dict = TransfoXLTokenizer(vocab_file=self.vocab_file ,lower_case=_UpperCamelCase )
snake_case_ : Dict = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(_UpperCamelCase ,["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,[0, 4, 8, 7] )
def a__ ( self :Optional[Any] ):
snake_case_ : Dict = TransfoXLTokenizer(lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def a__ ( self :Any ):
snake_case_ : List[Any] = TransfoXLTokenizer(lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def a__ ( self :List[str] ):
snake_case_ : str = TransfoXLTokenizer(lower_case=_UpperCamelCase )
snake_case_ : List[str] = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
snake_case_ : Optional[int] = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(_UpperCamelCase ) ,_UpperCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(_UpperCamelCase ) ,_UpperCamelCase )
def a__ ( self :Dict ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Dict = len(_UpperCamelCase )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" ,1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_UpperCamelCase ) ,original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) ,[1] )
self.assertEqual(tokenizer.decode([1] ) ,"""new1""" )
| 267
| 1
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase_ : Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE__: Union[str, Any]= nn.ModuleList(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(lowerCAmelCase , lowerCAmelCase , self.nets ) ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= controlnet(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE__: int= [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCAmelCase , lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , ) -> int:
SCREAMING_SNAKE_CASE__: Optional[int]= 0
SCREAMING_SNAKE_CASE__: Tuple= save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCAmelCase , is_main_process=lowerCAmelCase , save_function=lowerCAmelCase , safe_serialization=lowerCAmelCase , variant=lowerCAmelCase , )
idx += 1
SCREAMING_SNAKE_CASE__: List[Any]= model_path_to_save + f'_{idx}'
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , **lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: Any= 0
SCREAMING_SNAKE_CASE__: Union[str, Any]= []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE__: List[str]= pretrained_model_path
while os.path.isdir(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= ControlNetModel.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
controlnets.append(lowerCAmelCase )
idx += 1
SCREAMING_SNAKE_CASE__: List[Any]= pretrained_model_path + f'_{idx}'
logger.info(f'{len(lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.' )
if len(lowerCAmelCase ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(lowerCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(lowerCAmelCase )
| 64
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ : Tuple = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 595
| 0
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowercase__ = get_logger(__name__)
def __snake_case ( lowercase : Tuple , lowercase : int , lowercase : Dict , lowercase : str , lowercase : Tuple=0 ):
os.makedirs(_snake_case , exist_ok=_snake_case )
with FSDP.state_dict_type(
_snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
snake_case_ = os.path.join(_snake_case , _snake_case )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(_snake_case , _snake_case )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case_ = os.path.join(_snake_case , _snake_case )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(_snake_case , _snake_case )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = os.path.join(_snake_case , f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
logger.info(f'''Saving model to {ckpt_dir}''' )
snake_case_ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_snake_case , storage_writer=dist_cp.FileSystemWriter(_snake_case ) , planner=DefaultSavePlanner() , )
logger.info(f'''Model saved to {ckpt_dir}''' )
def __snake_case ( lowercase : List[str] , lowercase : Any , lowercase : Dict , lowercase : Optional[int] , lowercase : Any=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
snake_case_ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
snake_case_ = os.path.join(_snake_case , _snake_case )
logger.info(f'''Loading model from {input_model_file}''' )
snake_case_ = torch.load(_snake_case )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case_ = os.path.join(_snake_case , _snake_case )
logger.info(f'''Loading model from {input_model_file}''' )
snake_case_ = torch.load(_snake_case )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = (
os.path.join(_snake_case , f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
snake_case_ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_snake_case , storage_reader=dist_cp.FileSystemReader(_snake_case ) , planner=DefaultLoadPlanner() , )
snake_case_ = state_dict["model"]
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_snake_case )
def __snake_case ( lowercase : Tuple , lowercase : List[Any] , lowercase : Tuple , lowercase : Dict , lowercase : Dict , lowercase : List[Any]=0 ):
os.makedirs(_snake_case , exist_ok=_snake_case )
with FSDP.state_dict_type(
_snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = FSDP.optim_state_dict(_snake_case , _snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case_ = os.path.join(_snake_case , _snake_case )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_snake_case , _snake_case )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
snake_case_ = os.path.join(_snake_case , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_snake_case ) , planner=DefaultSavePlanner() , )
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def __snake_case ( lowercase : Optional[Any] , lowercase : int , lowercase : List[str] , lowercase : List[Any] , lowercase : int , lowercase : str=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case_ = os.path.join(_snake_case , _snake_case )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
snake_case_ = torch.load(_snake_case )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
snake_case_ = (
os.path.join(_snake_case , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
snake_case_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_snake_case ) , )
snake_case_ = optim_state["optimizer"]
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
snake_case_ = FSDP.optim_state_dict_to_load(_snake_case , _snake_case , _snake_case )
optimizer.load_state_dict(_snake_case )
| 708
|
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( lowercase : str , lowercase : str , lowercase : str ):
def get_masked_lm_array(lowercase : str ):
snake_case_ = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
snake_case_ = tf.train.load_variable(lowercase , lowercase )
if "kernel" in name:
snake_case_ = array.transpose()
return torch.from_numpy(lowercase )
def get_encoder_array(lowercase : str ):
snake_case_ = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
snake_case_ = tf.train.load_variable(lowercase , lowercase )
if "kernel" in name:
snake_case_ = array.transpose()
return torch.from_numpy(lowercase )
def get_encoder_layer_array(lowercase : int , lowercase : str ):
snake_case_ = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
snake_case_ = tf.train.load_variable(lowercase , lowercase )
if "kernel" in name:
snake_case_ = array.transpose()
return torch.from_numpy(lowercase )
def get_encoder_attention_layer_array(lowercase : int , lowercase : str , lowercase : int ):
snake_case_ = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
snake_case_ = tf.train.load_variable(lowercase , lowercase )
snake_case_ = array.reshape(lowercase )
if "kernel" in name:
snake_case_ = array.transpose()
return torch.from_numpy(lowercase )
print(f'''Loading model based on config from {config_path}...''' )
snake_case_ = BertConfig.from_json_file(lowercase )
snake_case_ = BertForMaskedLM(lowercase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
snake_case_ = model.bert.encoder.layer[layer_index]
# Self-attention
snake_case_ = layer.attention.self
snake_case_ = get_encoder_attention_layer_array(
lowercase , "_query_dense/kernel" , self_attn.query.weight.data.shape )
snake_case_ = get_encoder_attention_layer_array(
lowercase , "_query_dense/bias" , self_attn.query.bias.data.shape )
snake_case_ = get_encoder_attention_layer_array(
lowercase , "_key_dense/kernel" , self_attn.key.weight.data.shape )
snake_case_ = get_encoder_attention_layer_array(
lowercase , "_key_dense/bias" , self_attn.key.bias.data.shape )
snake_case_ = get_encoder_attention_layer_array(
lowercase , "_value_dense/kernel" , self_attn.value.weight.data.shape )
snake_case_ = get_encoder_attention_layer_array(
lowercase , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
snake_case_ = layer.attention.output
snake_case_ = get_encoder_attention_layer_array(
lowercase , "_output_dense/kernel" , self_output.dense.weight.data.shape )
snake_case_ = get_encoder_attention_layer_array(
lowercase , "_output_dense/bias" , self_output.dense.bias.data.shape )
snake_case_ = get_encoder_layer_array(lowercase , "_attention_layer_norm/gamma" )
snake_case_ = get_encoder_layer_array(lowercase , "_attention_layer_norm/beta" )
# Intermediate
snake_case_ = layer.intermediate
snake_case_ = get_encoder_layer_array(lowercase , "_intermediate_dense/kernel" )
snake_case_ = get_encoder_layer_array(lowercase , "_intermediate_dense/bias" )
# Output
snake_case_ = layer.output
snake_case_ = get_encoder_layer_array(lowercase , "_output_dense/kernel" )
snake_case_ = get_encoder_layer_array(lowercase , "_output_dense/bias" )
snake_case_ = get_encoder_layer_array(lowercase , "_output_layer_norm/gamma" )
snake_case_ = get_encoder_layer_array(lowercase , "_output_layer_norm/beta" )
# Embeddings
snake_case_ = get_encoder_array("_position_embedding_layer/embeddings" )
snake_case_ = get_encoder_array("_type_embedding_layer/embeddings" )
snake_case_ = get_encoder_array("_embedding_norm_layer/gamma" )
snake_case_ = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
snake_case_ = model.cls.predictions.transform
snake_case_ = get_masked_lm_array("dense/kernel" )
snake_case_ = get_masked_lm_array("dense/bias" )
snake_case_ = get_masked_lm_array("layer_norm/gamma" )
snake_case_ = get_masked_lm_array("layer_norm/beta" )
snake_case_ = get_masked_lm_array("embedding_table" )
# Pooling
snake_case_ = BertPooler(config=lowercase )
snake_case_ = get_encoder_array("_pooler_layer/kernel" )
snake_case_ = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(lowercase )
# Integration test - should load without any errors ;)
snake_case_ = BertForMaskedLM.from_pretrained(lowercase )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowercase__ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 420
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Any = logging.get_logger(__name__)
class UpperCamelCase__ ( __lowercase ):
"""simple docstring"""
__magic_name__ = ["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : Tuple = size if size is not None else {'''shortest_edge''': 384}
_lowerCAmelCase : Optional[Any] = get_size_dict(_A , default_to_square=_A )
_lowerCAmelCase : Union[str, Any] = do_resize
_lowerCAmelCase : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
_lowerCAmelCase : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
_lowerCAmelCase : Optional[Any] = resample
_lowerCAmelCase : Optional[int] = do_rescale
_lowerCAmelCase : int = rescale_factor
_lowerCAmelCase : str = do_normalize
_lowerCAmelCase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : int = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_lowerCAmelCase : Union[str, Any] = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_lowerCAmelCase : Tuple = int(shortest_edge / crop_pct )
_lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
_lowerCAmelCase : Optional[Any] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def a ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Union[str, Any] = crop_pct if crop_pct is not None else self.crop_pct
_lowerCAmelCase : str = resample if resample is not None else self.resample
_lowerCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Tuple = image_std if image_std is not None else self.image_std
_lowerCAmelCase : int = size if size is not None else self.size
_lowerCAmelCase : List[str] = get_size_dict(_A , default_to_square=_A )
_lowerCAmelCase : Optional[int] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase : Union[str, Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
_lowerCAmelCase : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
_lowerCAmelCase : Dict = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_lowerCAmelCase : List[str] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_lowerCAmelCase : str = [to_channel_dimension_format(_A , _A ) for image in images]
_lowerCAmelCase : Any = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 444
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class A_ ( __lowercase ):
'''simple docstring'''
def __init__( self , *_A , **_A) -> None:
"""simple docstring"""
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A)
| 485
| 0
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''segformer'''
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1e-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Optional[Any]:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , snake_case_ , )
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_encoder_blocks
__lowerCAmelCase = depths
__lowerCAmelCase = sr_ratios
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = patch_sizes
__lowerCAmelCase = strides
__lowerCAmelCase = mlp_ratios
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = classifier_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = decoder_hidden_size
__lowerCAmelCase = kwargs.get("""reshape_last_stage""" , snake_case_ )
__lowerCAmelCase = semantic_loss_ignore_index
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = version.parse('''1.11''' )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self ) -> float:
return 1e-4
@property
def A__ ( self ) -> int:
return 12
| 573
|
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , **snake_case_ ) -> Optional[Any]:
super().__init__(**snake_case_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(snake_case_ )
def A__ ( self , **snake_case_ ) -> Optional[Any]:
__lowerCAmelCase = {}
__lowerCAmelCase = {}
__lowerCAmelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
__lowerCAmelCase = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__lowerCAmelCase = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__lowerCAmelCase = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__lowerCAmelCase = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__lowerCAmelCase = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__lowerCAmelCase = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__lowerCAmelCase = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__lowerCAmelCase = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__lowerCAmelCase = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__lowerCAmelCase = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__lowerCAmelCase = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__lowerCAmelCase = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , snake_case_ , *snake_case_ , snake_case_=None , snake_case_=None , **snake_case_ ) -> Union[str, Any]:
return super().__call__(snake_case_ , *snake_case_ , num_workers=snake_case_ , batch_size=snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_=64 , snake_case_ = 0 , snake_case_ = 512 / 1_500 , snake_case_ = 32 , snake_case_ = 1 , ) -> Optional[int]:
__lowerCAmelCase = load_image(snake_case_ )
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.image_processor.generate_crop_boxes(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = self.image_processor(images=snake_case_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__lowerCAmelCase = self.get_inference_context()
with inference_context():
__lowerCAmelCase = self._ensure_tensor_on_device(snake_case_ , device=self.device )
__lowerCAmelCase = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__lowerCAmelCase = image_embeddings
__lowerCAmelCase = grid_points.shape[1]
__lowerCAmelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , snake_case_ , snake_case_ ):
__lowerCAmelCase = grid_points[:, i : i + points_per_batch, :, :]
__lowerCAmelCase = input_labels[:, i : i + points_per_batch]
__lowerCAmelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def A__ ( self , snake_case_ , snake_case_=0.88 , snake_case_=0.95 , snake_case_=0 , snake_case_=1 , ) -> Dict:
__lowerCAmelCase = model_inputs.pop("""input_boxes""" )
__lowerCAmelCase = model_inputs.pop("""is_last""" )
__lowerCAmelCase = model_inputs.pop("""original_sizes""" ).tolist()
__lowerCAmelCase = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__lowerCAmelCase = self.model(**snake_case_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__lowerCAmelCase = model_outputs["""pred_masks"""]
__lowerCAmelCase = self.image_processor.post_process_masks(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , binarize=snake_case_ )
__lowerCAmelCase = model_outputs["""iou_scores"""]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def A__ ( self , snake_case_ , snake_case_=False , snake_case_=False , snake_case_=0.7 , ) -> str:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__lowerCAmelCase = torch.cat(snake_case_ )
__lowerCAmelCase = torch.cat(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.image_processor.post_process_for_mask_generation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = defaultdict(snake_case_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(snake_case_ )
__lowerCAmelCase = {}
if output_rle_mask:
__lowerCAmelCase = rle_mask
if output_bboxes_mask:
__lowerCAmelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 573
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _snake_case ( A , A ) -> str | Literal[False]:
lowerCAmelCase__ = list(A )
lowerCAmelCase__ = list(A )
lowerCAmelCase__ = 0
for i in range(len(A ) ):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase__ = '''_'''
if count > 1:
return False
else:
return "".join(A )
def _snake_case ( A ) -> list[str]:
lowerCAmelCase__ = []
while True:
lowerCAmelCase__ = ['''$'''] * len(A )
lowerCAmelCase__ = []
for i in range(len(A ) ):
for j in range(i + 1 , len(A ) ):
lowerCAmelCase__ = compare_string(binary[i] , binary[j] )
if k is False:
lowerCAmelCase__ = '''*'''
lowerCAmelCase__ = '''*'''
temp.append('''X''' )
for i in range(len(A ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(A ) == 0:
return pi
lowerCAmelCase__ = list(set(A ) )
def _snake_case ( A , A ) -> list[str]:
lowerCAmelCase__ = []
for minterm in minterms:
lowerCAmelCase__ = ''''''
for _ in range(A ):
lowerCAmelCase__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(A )
return temp
def _snake_case ( A , A , A ) -> bool:
lowerCAmelCase__ = list(A )
lowerCAmelCase__ = list(A )
lowerCAmelCase__ = 0
for i in range(len(A ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _snake_case ( A , A ) -> list[str]:
lowerCAmelCase__ = []
lowerCAmelCase__ = [0] * len(A )
for i in range(len(chart[0] ) ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = -1
for j in range(len(A ) ):
if chart[j][i] == 1:
count += 1
lowerCAmelCase__ = j
if count == 1:
lowerCAmelCase__ = 1
for i in range(len(A ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(A ) ):
lowerCAmelCase__ = 0
temp.append(prime_implicants[i] )
while True:
lowerCAmelCase__ = 0
lowerCAmelCase__ = -1
lowerCAmelCase__ = 0
for i in range(len(A ) ):
lowerCAmelCase__ = chart[i].count(1 )
if count_n > max_n:
lowerCAmelCase__ = count_n
lowerCAmelCase__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(A ) ):
lowerCAmelCase__ = 0
def _snake_case ( A , A ) -> list[list[int]]:
lowerCAmelCase__ = [[0 for x in range(len(A ) )] for x in range(len(A ) )]
for i in range(len(A ) ):
lowerCAmelCase__ = prime_implicants[i].count('''_''' )
for j in range(len(A ) ):
if is_for_table(prime_implicants[i] , binary[j] , A ):
lowerCAmelCase__ = 1
return chart
def _snake_case ( ) -> None:
lowerCAmelCase__ = int(input('''Enter the no. of variables\n''' ) )
lowerCAmelCase__ = [
float(A )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
lowerCAmelCase__ = decimal_to_binary(A , A )
lowerCAmelCase__ = check(A )
print('''Prime Implicants are:''' )
print(A )
lowerCAmelCase__ = prime_implicant_chart(A , A )
lowerCAmelCase__ = selection(A , A )
print('''Essential Prime Implicants are:''' )
print(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 90
|
"""simple docstring"""
import socket
def a ( ):
'''simple docstring'''
UpperCAmelCase_ :Union[str, Any] = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
UpperCAmelCase_ :int = socket.gethostname()
UpperCAmelCase_ :List[Any] = 12312
sock.connect((host, port) )
sock.send(b'''Hello server!''' )
with open('''Received_file''', '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
UpperCAmelCase_ :int = sock.recv(1024 )
if not data:
break
out_file.write(__snake_case )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 608
| 0
|
import math
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : Tuple=0 ): # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = n
SCREAMING_SNAKE_CASE : Optional[int] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = w
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
__UpperCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 721
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''deberta-v2'''
def __init__( self : int , lowerCamelCase_ : Optional[Any]=12_81_00 , lowerCamelCase_ : str=15_36 , lowerCamelCase_ : int=24 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : List[Any]=61_44 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Dict=1e-7 , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : Dict="gelu" , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = relative_attention
SCREAMING_SNAKE_CASE : str = max_relative_positions
SCREAMING_SNAKE_CASE : int = pad_token_id
SCREAMING_SNAKE_CASE : List[str] = position_biased_input
# Backwards compatibility
if type(lowerCamelCase_ ) == str:
SCREAMING_SNAKE_CASE : Dict = [x.strip() for x in pos_att_type.lower().split("""|""" )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = kwargs.get("""pooler_hidden_size""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pooler_dropout
SCREAMING_SNAKE_CASE : Any = pooler_hidden_act
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return 12
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = super().generate_dummy_inputs(preprocessor=lowerCamelCase_ , framework=lowerCamelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 79
| 0
|
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''torch''']
def __init__( self : List[str] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ) ->Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[str] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Any ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Dict , *lowerCamelCase_ : str , **lowerCamelCase_ : List[str] ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = ['''torch''']
def __init__( self : int , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Tuple ) ->Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Any , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : int ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : int = ['''torch''']
def __init__( self : Optional[int] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : List[str] ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Optional[Any] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : str , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Union[str, Any] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ['''torch''']
def __init__( self : Dict , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Any , *lowerCamelCase_ : str , **lowerCamelCase_ : int ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[Any] ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : str = ['''torch''']
def __init__( self : str , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Any ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Dict ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : str , **lowerCamelCase_ : str ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = ['''torch''']
def __init__( self : Tuple , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[int] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Optional[int] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ['''torch''']
def __init__( self : List[Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : Tuple ) ->Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : int , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : int ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[str] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ) ->Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : str = ['''torch''']
def __init__( self : Any , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Dict ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : int ) ->int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : int , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ['''torch''']
def __init__( self : Any , *lowerCamelCase_ : Any , **lowerCamelCase_ : int ) ->Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : str , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[Any] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ['''torch''']
def __init__( self : Optional[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Tuple ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Dict ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : int , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Union[str, Any] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Any = ['''torch''']
def __init__( self : int , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Dict ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Dict , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
def lowercase ( *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(__lowerCAmelCase , ["""torch"""] )
def lowercase ( *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(__lowerCAmelCase , ["""torch"""] )
def lowercase ( *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(__lowerCAmelCase , ["""torch"""] )
def lowercase ( *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(__lowerCAmelCase , ["""torch"""] )
def lowercase ( *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(__lowerCAmelCase , ["""torch"""] )
def lowercase ( *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(__lowerCAmelCase , ["""torch"""] )
def lowercase ( *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(__lowerCAmelCase , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ['''torch''']
def __init__( self : int , *lowerCamelCase_ : Any , **lowerCamelCase_ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : int ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = ['''torch''']
def __init__( self : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Any ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Optional[int] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Dict ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Any = ['''torch''']
def __init__( self : List[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ) ->Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[str] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : int ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Dict , *lowerCamelCase_ : Any , **lowerCamelCase_ : Dict ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ['''torch''']
def __init__( self : Optional[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[int] ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[Any] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[str] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = ['''torch''']
def __init__( self : List[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : List[Any] ) ->int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : int , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Optional[int] ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Tuple = ['''torch''']
def __init__( self : int , *lowerCamelCase_ : Any , **lowerCamelCase_ : Tuple ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[int] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Any = ['''torch''']
def __init__( self : str , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : str ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Dict ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Dict = ['''torch''']
def __init__( self : List[str] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Any , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[Any] ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Tuple ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ['''torch''']
def __init__( self : List[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : str , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Any ) ->int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Tuple = ['''torch''']
def __init__( self : Tuple , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Any ) ->Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[str] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Tuple ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = ['''torch''']
def __init__( self : List[str] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Dict ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Tuple ) ->int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ['''torch''']
def __init__( self : List[str] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : int ) ->Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : List[str] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ['''torch''']
def __init__( self : int , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[Any] ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : Any , **lowerCamelCase_ : List[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ['''torch''']
def __init__( self : Optional[Any] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[str] ) ->Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Dict ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : Any , **lowerCamelCase_ : Dict ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : int = ['''torch''']
def __init__( self : str , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : int ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : str , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[str] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[Any] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ['''torch''']
def __init__( self : List[Any] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[int] ) ->int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Dict , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[int] ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[Any] ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : str = ['''torch''']
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Any ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : Any , **lowerCamelCase_ : List[str] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''torch''']
def __init__( self : List[str] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[Any] ) ->str:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : str , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[str] , *lowerCamelCase_ : int , **lowerCamelCase_ : Tuple ) ->Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : str = ['''torch''']
def __init__( self : Tuple , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Any ) ->Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[Any] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : int , **lowerCamelCase_ : Dict ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Dict = ['''torch''']
def __init__( self : Tuple , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ) ->Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : List[str] ) ->int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Any , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[str] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : str = ['''torch''']
def __init__( self : Union[str, Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : int ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : Any ) ->int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : int , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[Any] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ['''torch''']
def __init__( self : Any , *lowerCamelCase_ : int , **lowerCamelCase_ : str ) ->Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : int , *lowerCamelCase_ : int , **lowerCamelCase_ : Tuple ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : str ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : str = ['''torch''']
def __init__( self : Dict , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ) ->int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ) ->Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Dict ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''torch''']
def __init__( self : List[str] , *lowerCamelCase_ : int , **lowerCamelCase_ : Any ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[Any] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[str] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ['''torch''']
def __init__( self : str , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[int] ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ) ->int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : int = ['''torch''']
def __init__( self : Optional[int] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : int , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Any ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Any , *lowerCamelCase_ : Any , **lowerCamelCase_ : int ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''torch''']
def __init__( self : Any , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Dict , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Any ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Dict = ['''torch''']
def __init__( self : List[str] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : int ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[Any] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Any = ['''torch''']
def __init__( self : Dict , *lowerCamelCase_ : Any , **lowerCamelCase_ : Any ) ->Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : str , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : str ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Tuple = ['''torch''']
def __init__( self : Tuple , *lowerCamelCase_ : int , **lowerCamelCase_ : List[Any] ) ->List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Union[str, Any] ) ->int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Any = ['''torch''']
def __init__( self : Dict , *lowerCamelCase_ : str , **lowerCamelCase_ : str ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : Any , **lowerCamelCase_ : Tuple ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : int , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[int] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ['''torch''']
def __init__( self : Any , *lowerCamelCase_ : str , **lowerCamelCase_ : Dict ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[str] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[int] ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : int = ['''torch''']
def __init__( self : List[str] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Dict ) ->Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : int ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[int] ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ['''torch''']
def __init__( self : List[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Any ) ->List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Dict , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : str ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ['''torch''']
def __init__( self : List[Any] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[int] ) ->List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Tuple ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : int , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[str] ) ->int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ['''torch''']
def __init__( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : Dict ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Optional[int] ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''torch''']
def __init__( self : str , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ) ->str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[Any] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = ['''torch''']
def __init__( self : int , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Union[str, Any] ) ->Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Any , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class snake_case ( metaclass=A__ ):
'''simple docstring'''
UpperCamelCase__ : str = ['''torch''']
def __init__( self : int , *lowerCamelCase_ : int , **lowerCamelCase_ : Tuple ) ->int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls : List[str] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
| 392
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __a :
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
raise NotImplementedError()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
raise NotImplementedError()
class __a ( A__ ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : "AutoTokenizer" , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = tokenizer
UpperCamelCase__ : Tuple = skip_prompt
UpperCamelCase__ : List[str] = decode_kwargs
# variables used in the streaming process
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : int = True
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
UpperCamelCase__ : Dict = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCamelCase__ : Union[str, Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCamelCase__ : int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
UpperCamelCase__ : Any = text[self.print_len :]
UpperCamelCase__ : Dict = []
UpperCamelCase__ : int = 0
# If the last token is a CJK character, we print the characters.
elif len(SCREAMING_SNAKE_CASE ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCamelCase__ : List[str] = text[self.print_len :]
self.print_len += len(SCREAMING_SNAKE_CASE )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCamelCase__ : Dict = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(SCREAMING_SNAKE_CASE )
self.on_finalized_text(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if len(self.token_cache ) > 0:
UpperCamelCase__ : int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
UpperCamelCase__ : Tuple = text[self.print_len :]
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = 0
else:
UpperCamelCase__ : List[str] = ""
UpperCamelCase__ : Dict = True
self.on_finalized_text(SCREAMING_SNAKE_CASE , stream_end=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
print(SCREAMING_SNAKE_CASE , flush=SCREAMING_SNAKE_CASE , end="" if not stream_end else None )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class __a ( A__ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE : "AutoTokenizer" , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[float] = None , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = Queue()
UpperCamelCase__ : str = None
UpperCamelCase__ : str = timeout
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
self.text_queue.put(SCREAMING_SNAKE_CASE , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[Any] ):
'''simple docstring'''
return self
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 228
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = False ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = F'''Expected string as input, found {type(UpperCamelCase__ )}'''
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = F'''Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}'''
raise ValueError(UpperCamelCase__ )
A__ = input_str.split('_' )
A__ = 0 if use_pascal else 1
A__ = words[start_index:]
A__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
A__ = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 536
|
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase__( __A ):
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
requires_backends(self ,'decord' )
self.check_model_type(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> Any:
A__ = {}
if frame_sampling_rate is not None:
A__ = frame_sampling_rate
if num_frames is not None:
A__ = num_frames
A__ = {}
if top_k is not None:
A__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=1 ) -> List[Any]:
if num_frames is None:
A__ = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
A__ = BytesIO(requests.get(__UpperCAmelCase ).content )
A__ = VideoReader(__UpperCAmelCase )
videoreader.seek(0 )
A__ = 0
A__ = num_frames * frame_sampling_rate - 1
A__ = np.linspace(__UpperCAmelCase ,__UpperCAmelCase ,num=__UpperCAmelCase ,dtype=np.intaa )
A__ = videoreader.get_batch(__UpperCAmelCase ).asnumpy()
A__ = list(__UpperCAmelCase )
A__ = self.image_processor(__UpperCAmelCase ,return_tensors=self.framework )
return model_inputs
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[int]:
A__ = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=5 ) -> int:
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(__UpperCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase ,__UpperCAmelCase )]
| 536
| 1
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def __lowerCAmelCase ( *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ (UpperCamelCase : Image ):
'''simple docstring'''
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A ( unittest.TestCase ):
lowercase_ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
"""simple docstring"""
_a = DepthEstimationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
_a = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , lowerCAmelCase_ )
import datasets
_a = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
_a = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , lowerCAmelCase_ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def __lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
pass
@slow
@require_torch
def __lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
_a = '''Intel/dpt-large'''
_a = pipeline('''depth-estimation''' , model=lowerCAmelCase_ )
_a = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
_a = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 22
|
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22
| 1
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = LayoutLMTokenizer
__UpperCAmelCase = LayoutLMTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = True
def _lowerCAmelCase ( self : Dict ):
super().setUp()
SCREAMING_SNAKE_CASE =[
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowerCAmelCase ( self : List[str] ,**snake_case : Any ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**snake_case )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE ='UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE ='unwanted, running'
return input_text, output_text
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE =tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) ,[7, 4, 5, 10, 8, 9] )
def _lowerCAmelCase ( self : Optional[int] ):
pass
| 252
|
from ...processing_utils import ProcessorMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = ['image_processor', 'feature_extractor']
__UpperCAmelCase = 'TvltImageProcessor'
__UpperCAmelCase = 'TvltFeatureExtractor'
def __init__( self : Optional[int] ,snake_case : List[str] ,snake_case : Dict ):
super().__init__(image_processor=snake_case ,feature_extractor=snake_case )
SCREAMING_SNAKE_CASE =image_processor
SCREAMING_SNAKE_CASE =feature_extractor
def __call__( self : Dict ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]=None ,snake_case : List[Any]=None ,snake_case : int=None ,snake_case : List[Any]=False ,snake_case : Optional[int]=False ,*snake_case : int ,**snake_case : str ,):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
SCREAMING_SNAKE_CASE =None
if images is not None:
SCREAMING_SNAKE_CASE =self.image_processor(snake_case ,mask_pixel=snake_case ,*snake_case ,**snake_case )
if images_mixed is not None:
SCREAMING_SNAKE_CASE =self.image_processor(snake_case ,is_mixed=snake_case ,*snake_case ,**snake_case )
if audio is not None:
SCREAMING_SNAKE_CASE =self.feature_extractor(
snake_case ,*snake_case ,sampling_rate=snake_case ,mask_audio=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE ={}
if audio is not None:
output_dict.update(snake_case )
if images is not None:
output_dict.update(snake_case )
if images_mixed_dict is not None:
output_dict.update(snake_case )
return output_dict
@property
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.image_processor.model_input_names
SCREAMING_SNAKE_CASE =self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 252
| 1
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_snake_case : Dict = logging.getLogger(__name__)
@dataclass
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__UpperCAmelCase : bool = field(default=_lowerCAmelCase , metadata={"help": "Whether to SortishSamler or not."} )
__UpperCAmelCase : bool = field(
default=_lowerCAmelCase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__UpperCAmelCase : bool = field(default=_lowerCAmelCase , metadata={"help": "whether to use adafactor"} )
__UpperCAmelCase : Optional[float] = field(
default=_lowerCAmelCase , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__UpperCAmelCase : Optional[float] = field(
default=_lowerCAmelCase , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__UpperCAmelCase : Optional[float] = field(default=_lowerCAmelCase , metadata={"help": "Dropout probability. Goes into model.config."} )
__UpperCAmelCase : Optional[float] = field(
default=_lowerCAmelCase , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__UpperCAmelCase : Optional[str] = field(
default="linear" , metadata={"help": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 81
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : int = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["""YolosFeatureExtractor"""]
a__ : List[str] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 165
| 0
|
import math
def lowerCamelCase_ ( UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
_UpperCamelCase : Dict = len(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = int(math.floor(math.sqrt(UpperCAmelCase_ ) ) )
_UpperCamelCase : Union[str, Any] = 0
while arr[min(UpperCAmelCase_ , UpperCAmelCase_ ) - 1] < x:
_UpperCamelCase : List[Any] = step
step += int(math.floor(math.sqrt(UpperCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCamelCase : int = prev + 1
if prev == min(UpperCAmelCase_ , UpperCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
lowerCAmelCase__ = int(input("""Enter the number to be searched:\n"""))
lowerCAmelCase__ = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f'Number {x} is at index {res}')
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 1
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__a : str = 5_0_0_0_0
__a : int = 5_0_0_0
__a , __a : Optional[Any] = os.path.split(__file__)
__a : int = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __magic_name__ ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
for i in range(lowercase_ ):
UpperCamelCase = dataset[i]
@get_duration
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
for i in range(0 , len(lowercase_ ) , lowercase_ ):
UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
with dataset.formatted_as(type=lowercase_ ):
for i in range(lowercase_ ):
UpperCamelCase = dataset[i]
@get_duration
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
with dataset.formatted_as(type=lowercase_ ):
for i in range(0 , lowercase_ , lowercase_ ):
UpperCamelCase = dataset[i : i + batch_size]
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
UpperCamelCase = generate_example_dataset(
os.path.join(lowercase_ , "dataset.arrow" ) , lowercase_ , num_examples=lowercase_ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowercase_ ) )
UpperCamelCase = func(lowercase_ , **lowercase_ )
print("shuffling dataset" )
UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowercase_ ) )
UpperCamelCase = func(
lowercase_ , **lowercase_ )
with open(lowercase_ , "wb" ) as f:
f.write(json.dumps(lowercase_ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 606
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__a : Optional[Any] = """facebook/wmt19-en-de"""
__a : Union[str, Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__a : Optional[int] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__a : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
__a : Optional[Any] = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
__a : Optional[Any] = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
__a : Union[str, Any] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 606
| 1
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __A( UpperCAmelCase_ ):
def __init__( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : List[str]=1_3 , __UpperCamelCase : int=7 , __UpperCamelCase : int=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : str=False , __UpperCamelCase : str=True , __UpperCamelCase : Tuple=9_9 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : Optional[int]=5 , __UpperCamelCase : Union[str, Any]=4 , __UpperCamelCase : int=3_7 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Union[str, Any]=5_1_2 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : int=0.02 , __UpperCamelCase : str=3 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Optional[int]=None , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def lowercase__ ( self : Dict ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Any ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ):
lowerCamelCase_ = DistilBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ = model(_lowercase , _lowercase )
lowerCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
lowerCamelCase_ = DistilBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
lowerCamelCase_ = DistilBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ = model(
_lowercase , attention_mask=_lowercase , start_positions=_lowercase , end_positions=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = DistilBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : List[str] ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = DistilBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = DistilBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = model(
_lowercase , attention_mask=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : int ):
lowerCamelCase_ = self.prepare_config_and_inputs()
((lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_)) = config_and_inputs
lowerCamelCase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowercase__ ( self : Dict ):
lowerCamelCase_ = DistilBertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=_lowercase , dim=3_7 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowercase )
def lowercase__ ( self : str ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowercase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowercase )
def lowercase__ ( self : int ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowercase )
def lowercase__ ( self : str ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowercase )
def lowercase__ ( self : str ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowercase )
@slow
def lowercase__ ( self : Tuple ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = DistilBertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
@require_torch_gpu
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCamelCase_ = True
lowerCamelCase_ = model_class(config=_lowercase )
lowerCamelCase_ = self._prepare_for_class(_lowercase , _lowercase )
lowerCamelCase_ = torch.jit.trace(
_lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowercase , os.path.join(_lowercase , """traced_model.pt""" ) )
lowerCamelCase_ = torch.jit.load(os.path.join(_lowercase , """traced_model.pt""" ) , map_location=_lowercase )
loaded(inputs_dict["""input_ids"""].to(_lowercase ) , inputs_dict["""attention_mask"""].to(_lowercase ) )
@require_torch
class __A( unittest.TestCase ):
@slow
def lowercase__ ( self : int ):
lowerCamelCase_ = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowerCamelCase_ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowerCamelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ = model(_lowercase , attention_mask=_lowercase )[0]
lowerCamelCase_ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _lowercase )
lowerCamelCase_ = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 703
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __A( UpperCAmelCase ):
@staticmethod
@abstractmethod
def lowercase__ ( __UpperCamelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowercase__ ( self : Any ):
raise NotImplementedError()
| 103
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( __a , unittest.TestCase):
__a : Union[str, Any] = PhobertTokenizer
__a : Dict = False
def __snake_case ( self ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : Tuple = ["T@@", "i", "I", "R@@", "r", "e@@"]
_UpperCAmelCase : Any = dict(zip(_A , range(len(_A ) ) ) )
_UpperCAmelCase : Optional[Any] = ["#version: 0.2", "l à</w>"]
_UpperCAmelCase : int = {"unk_token": "<unk>"}
_UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_A ) )
def __snake_case ( self , **_A ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = "Tôi là VinAI Research"
_UpperCAmelCase : List[str] = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase : Tuple = "Tôi là VinAI Research"
_UpperCAmelCase : int = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(_A )
print(_A )
self.assertListEqual(_A , _A )
_UpperCAmelCase : List[str] = tokens + [tokenizer.unk_token]
_UpperCAmelCase : List[str] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
| 238
|
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = WavaVecaForSequenceClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
lowercase_ : Optional[int] = downstream_dict["projector.weight"]
lowercase_ : str = downstream_dict["projector.bias"]
lowercase_ : int = downstream_dict["model.post_net.linear.weight"]
lowercase_ : Optional[Any] = downstream_dict["model.post_net.linear.bias"]
return model
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = WavaVecaForAudioFrameClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
lowercase_ : Any = downstream_dict["model.linear.weight"]
lowercase_ : List[str] = downstream_dict["model.linear.bias"]
return model
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = WavaVecaForXVector.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
lowercase_ : str = downstream_dict["connector.weight"]
lowercase_ : List[str] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase_ : Union[str, Any] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowercase_ : Dict = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowercase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowercase_ : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowercase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowercase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowercase_ : Optional[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = torch.load(_UpperCamelCase , map_location="cpu" )
lowercase_ : Dict = checkpoint["Downstream"]
lowercase_ : Optional[Any] = WavaVecaConfig.from_pretrained(_UpperCamelCase )
lowercase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(
_UpperCamelCase , return_attention_mask=_UpperCamelCase , do_normalize=_UpperCamelCase )
lowercase_ : Dict = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
lowercase_ : Any = convert_classification(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith("ForAudioFrameClassification" ):
lowercase_ : Optional[int] = convert_diarization(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith("ForXVector" ):
lowercase_ : List[Any] = convert_xvector(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowercase_ : List[str] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
UpperCamelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 620
| 0
|
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : List[Any] ) -> int:
'''simple docstring'''
lowercase : Tuple =Rectangle(height=0.5 , width=0.5 )
lowercase : str =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowercase : Any =Rectangle(height=0.2_5 , width=0.2_5 )
lowercase : Tuple =[mem.copy() for i in range(6 )]
lowercase : Tuple =[mem.copy() for i in range(6 )]
lowercase : Optional[int] =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : List[str] =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : Dict =VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : Any =Text('''CPU''' , font_size=24 )
lowercase : Tuple =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
lowercase : Tuple =[mem.copy() for i in range(4 )]
lowercase : Dict =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : List[Any] =Text('''GPU''' , font_size=24 )
lowercase : Union[str, Any] =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase )
lowercase : List[str] =[mem.copy() for i in range(6 )]
lowercase : str =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : Any =Text('''Model''' , font_size=24 )
lowercase : Dict =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase )
lowercase : List[str] =[]
lowercase : int =[]
for i, rect in enumerate(UpperCAmelCase ):
lowercase : List[Any] =fill.copy().set_fill(UpperCAmelCase , opacity=0.8 )
target.move_to(UpperCAmelCase )
model_arr.append(UpperCAmelCase )
lowercase : Union[str, Any] =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase )
self.add(*UpperCAmelCase , *UpperCAmelCase )
lowercase : List[Any] =[meta_mem.copy() for i in range(6 )]
lowercase : Any =[meta_mem.copy() for i in range(6 )]
lowercase : Optional[int] =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : str =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : Optional[Any] =VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : List[str] =Text('''Disk''' , font_size=24 )
lowercase : Optional[int] =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
disk.move_to([-4, -1.2_5, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
lowercase : List[str] =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase : List[str] =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
lowercase : str =MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase )
lowercase : List[Any] =MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase ) )
lowercase : Optional[int] =Square(0.3 )
input.set_fill(UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase , buff=0.5 )
self.play(Write(UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase , buff=0.0_2 )
self.play(MoveToTarget(UpperCAmelCase ) )
self.play(FadeOut(UpperCAmelCase ) )
lowercase : str =Arrow(start=UpperCAmelCase , end=UpperCAmelCase , color=UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowercase : Optional[int] =MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) )
lowercase : int ={'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.0_2}
self.play(
Write(UpperCAmelCase ) , Circumscribe(model_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowercase : Union[str, Any] =a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
lowercase : Union[str, Any] =AnimationGroup(
FadeOut(UpperCAmelCase , run_time=0.5 ) , MoveToTarget(UpperCAmelCase , run_time=0.5 ) , FadeIn(UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowercase : str =0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowercase : Tuple =a_c
lowercase : Union[str, Any] =a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase ) , FadeOut(UpperCAmelCase , run_time=0.5 ) , )
lowercase : Tuple =MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) , MoveToTarget(UpperCAmelCase ) )
self.wait()
| 706
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8
| 0
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
A_ : str = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
A_ : str = 1 - (matter_density + radiation_density + dark_energy)
A_ : Optional[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
A_ : str = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
UpperCamelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 590
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = "" ):
A_ : Union[str, Any] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
A_ : Dict = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , '''html.parser''' )
A_ : List[str] = soup.find_all('''td''' , attrs='''titleColumn''' )
A_ : Optional[int] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = "IMDb_Top_250_Movies.csv" ):
A_ : Optional[Any] = get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE , '''w''' , newline='''''' ) as out_file:
A_ : Union[str, Any] = csv.writer(SCREAMING_SNAKE_CASE )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 590
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :Tuple ={
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Dict =[
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__snake_case :Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717
|
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase__ : list[float] ) -> bool:
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
A = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.