code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = (DDPMScheduler,)
def __magic_name__ ( self , **__snake_case ) -> Optional[int]:
'''simple docstring'''
__a ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**__snake_case )
return config
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__snake_case )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
self.check_over_configs(thresholding=__snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__snake_case , prediction_type=__snake_case , sample_max_value=__snake_case , )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config()
__a =scheduler_class(**__snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config()
__a =scheduler_class(**__snake_case )
__a =len(__snake_case )
__a =self.dummy_model()
__a =self.dummy_sample_deter
__a =torch.manual_seed(0 )
for t in reversed(range(__snake_case ) ):
# 1. predict noise residual
__a =model(__snake_case , __snake_case )
# 2. predict previous mean of sample x_t-1
__a =scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a =pred_prev_sample
__a =torch.sum(torch.abs(__snake_case ) )
__a =torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config(prediction_type='v_prediction' )
__a =scheduler_class(**__snake_case )
__a =len(__snake_case )
__a =self.dummy_model()
__a =self.dummy_sample_deter
__a =torch.manual_seed(0 )
for t in reversed(range(__snake_case ) ):
# 1. predict noise residual
__a =model(__snake_case , __snake_case )
# 2. predict previous mean of sample x_t-1
__a =scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a =pred_prev_sample
__a =torch.sum(torch.abs(__snake_case ) )
__a =torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config()
__a =scheduler_class(**__snake_case )
__a =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__snake_case )
__a =scheduler.timesteps
for i, timestep in enumerate(__snake_case ):
if i == len(__snake_case ) - 1:
__a =-1
else:
__a =timesteps[i + 1]
__a =scheduler.previous_timestep(__snake_case )
__a =prev_t.item()
self.assertEqual(__snake_case , __snake_case )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config()
__a =scheduler_class(**__snake_case )
__a =[100, 87, 50, 51, 0]
with self.assertRaises(__snake_case , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config()
__a =scheduler_class(**__snake_case )
__a =[100, 87, 50, 1, 0]
__a =len(__snake_case )
with self.assertRaises(__snake_case , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=__snake_case , timesteps=__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config()
__a =scheduler_class(**__snake_case )
__a =[scheduler.config.num_train_timesteps]
with self.assertRaises(
__snake_case , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=__snake_case )
| 218
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =[False] * len(_snake_case )
__a =[-1] * len(_snake_case )
def dfs(_snake_case : Dict , _snake_case : Any ):
__a =True
__a =c
for u in graph[v]:
if not visited[u]:
dfs(_snake_case , 1 - c )
for i in range(len(_snake_case ) ):
if not visited[i]:
dfs(_snake_case , 0 )
for i in range(len(_snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCAmelCase : int = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 218
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase = """BlipImageProcessor"""
__lowerCAmelCase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = False
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = self.image_processor
def __call__( self : str , lowerCamelCase_ : ImageInput = None , lowerCamelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 0 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , **lowerCamelCase_ : List[str] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
UpperCamelCase = self.tokenizer
UpperCamelCase = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
return text_encoding
# add pixel_values
UpperCamelCase = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ )
if text is not None:
UpperCamelCase = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
else:
UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase_ )
return encoding_image_processor
def lowerCamelCase_ ( self : Dict , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 355
|
from __future__ import annotations
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> tuple[float, list[float]]:
'''simple docstring'''
UpperCamelCase = list(range(len(UpperCamelCase_ ) ) )
UpperCamelCase = [v / w for v, w in zip(UpperCamelCase_ , UpperCamelCase_ )]
index.sort(key=lambda UpperCamelCase_ : ratio[i] , reverse=UpperCamelCase_ )
UpperCamelCase = 0
UpperCamelCase = [0] * len(UpperCamelCase_ )
for i in index:
if weight[i] <= capacity:
UpperCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165
| 0
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
if num < 0:
return False
_UpperCamelCase : int = num
_UpperCamelCase : int = 0
while num > 0:
_UpperCamelCase : str = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
snake_case_ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowercase__ :
lowercase__ = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if self.train_file is not None:
_UpperCamelCase : List[Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCamelCase : Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = 'label' if 'label' in features[0].keys() else 'labels'
_UpperCamelCase : List[Any] = [feature.pop(lowerCamelCase__ ) for feature in features]
_UpperCamelCase : Dict = len(lowerCamelCase__ )
_UpperCamelCase : List[str] = len(features[0]['input_ids'] )
_UpperCamelCase : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase__ )] for feature in features
]
_UpperCamelCase : str = list(chain(*lowerCamelCase__ ) )
_UpperCamelCase : Tuple = self.tokenizer.pad(
lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
_UpperCamelCase : str = {k: v.view(lowerCamelCase__ ,lowerCamelCase__ ,-1 ) for k, v in batch.items()}
# Add back labels
_UpperCamelCase : Optional[int] = torch.tensor(lowerCamelCase__ ,dtype=torch.intaa )
return batch
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
datasets.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCamelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCamelCase : Optional[int] = {}
if data_args.train_file is not None:
_UpperCamelCase : Tuple = data_args.train_file
if data_args.validation_file is not None:
_UpperCamelCase : Tuple = data_args.validation_file
_UpperCamelCase : Any = data_args.train_file.split('.' )[-1]
_UpperCamelCase : Union[str, Any] = load_dataset(
UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCamelCase : List[str] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCamelCase : Any = [f'ending{i}' for i in range(4 )]
_UpperCamelCase : int = 'sent1'
_UpperCamelCase : List[str] = 'sent2'
if data_args.max_seq_length is None:
_UpperCamelCase : int = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_UpperCamelCase : int = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_UpperCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase_ ):
_UpperCamelCase : str = [[context] * 4 for context in examples[context_name]]
_UpperCamelCase : Optional[Any] = examples[question_header_name]
_UpperCamelCase : Tuple = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ )
]
# Flatten out
_UpperCamelCase : Optional[int] = list(chain(*UpperCAmelCase_ ) )
_UpperCamelCase : Optional[Any] = list(chain(*UpperCAmelCase_ ) )
# Tokenize
_UpperCamelCase : Tuple = tokenizer(
UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCamelCase : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCamelCase : Tuple = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_UpperCamelCase : Tuple = train_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_UpperCamelCase : Union[str, Any] = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCamelCase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCamelCase : Union[str, Any] = min(len(UpperCAmelCase_ ) , data_args.max_eval_samples )
_UpperCamelCase : str = eval_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_UpperCamelCase : Dict = eval_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_UpperCamelCase : List[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = eval_predictions
_UpperCamelCase : List[str] = np.argmax(UpperCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCamelCase : Optional[int] = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCamelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase : int = last_checkpoint
_UpperCamelCase : List[str] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCamelCase : Union[str, Any] = train_result.metrics
_UpperCamelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCamelCase : List[Any] = trainer.evaluate()
_UpperCamelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ )
_UpperCamelCase : int = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 83
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase: str = logging.get_logger(__name__)
lowerCAmelCase: Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
for attribute in key.split('.' ):
a : Optional[int] = getattr(_A , _A )
if weight_type is not None:
a : str = getattr(_A , _A ).shape
else:
a : str = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
a : Optional[Any] = value
elif weight_type == "weight_g":
a : Union[str, Any] = value
elif weight_type == "weight_v":
a : str = value
elif weight_type == "bias":
a : Optional[int] = value
else:
a : Optional[int] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = []
a : int = fairseq_model.state_dict()
a : int = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
a : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == 'group' , )
a : Any = True
else:
for key, mapped_key in MAPPING.items():
a : Optional[Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
a : Tuple = True
if "*" in mapped_key:
a : Optional[int] = name.split(_A )[0].split('.' )[-2]
a : List[Any] = mapped_key.replace('*' , _A )
if "weight_g" in name:
a : Dict = 'weight_g'
elif "weight_v" in name:
a : str = 'weight_v'
elif "weight" in name:
a : int = 'weight'
elif "bias" in name:
a : Optional[Any] = 'bias'
else:
a : Union[str, Any] = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
a : int = full_name.split('conv_layers.' )[-1]
a : Tuple = name.split('.' )
a : List[str] = int(items[0] )
a : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
a : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
a : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
a : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
a : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_A )
@torch.no_grad()
def lowerCamelCase__ ( _A , _A , _A=None , _A=None , _A=True ):
if config_path is not None:
a : Optional[int] = HubertConfig.from_pretrained(_A )
else:
a : int = HubertConfig()
if is_finetuned:
if dict_path:
a : str = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a : Any = target_dict.pad_index
a : List[str] = target_dict.bos_index
a : str = target_dict.eos_index
a : Optional[int] = len(target_dict.symbols )
a : Union[str, Any] = os.path.join(_A , 'vocab.json' )
if not os.path.isdir(_A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
with open(_A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , _A )
a : Tuple = WavaVecaCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_A , )
a : List[str] = True if config.feat_extract_norm == 'layer' else False
a : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
a : Optional[Any] = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
a : Optional[int] = HubertForCTC(_A )
else:
a : Union[str, Any] = HubertModel(_A )
if is_finetuned:
a , a , a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a , a , a : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a : Union[str, Any] = model[0].eval()
recursively_load_weights(_A , _A , _A )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase: Tuple = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 96
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase: List[str] = 'examples/'
lowerCAmelCase: List[Any] = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowerCAmelCase: str = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowerCAmelCase: str = 'README.md'
def lowerCamelCase__ ( _A , _A , _A ):
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.read()
a , a : Tuple = REPLACE_PATTERNS[pattern]
a : Dict = replace.replace('VERSION' , _A )
a : Dict = re_pattern.sub(_A , _A )
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_A )
def lowerCamelCase__ ( _A ):
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern='examples' )
def lowerCamelCase__ ( _A , _A=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def lowerCamelCase__ ( ):
a : Tuple = '🤗 Transformers currently provides the following architectures'
a : Any = '1. Want to contribute a new model?'
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.readlines()
# Find the start of the list.
a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
a : List[Any] = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_A )
def lowerCamelCase__ ( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
a : Union[str, Any] = f.read()
a : Tuple = REPLACE_PATTERNS['init'][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def lowerCamelCase__ ( _A=False ):
a : int = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
a : Any = default_version.base_version
elif patch:
a : Dict = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
a : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
a : List[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
a : Union[str, Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
a : int = get_version()
a : Any = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
a : int = current_version.base_version
# Check with the user we got that right.
a : Tuple = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
a : Optional[int] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_A )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase: Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 96
| 1
|
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=None , UpperCamelCase=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=UpperCamelCase )
@dataclass
class _lowercase :
a = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
a = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
a = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
a = field(
default=_lowercase , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
a = field(
default=_lowercase , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
a = field(
default=_lowercase , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
a = field(default=_lowercase , metadata={"""help""": """Use FP16 to accelerate inference."""} )
a = field(default=_lowercase , metadata={"""help""": """Benchmark training of model"""} )
a = field(default=_lowercase , metadata={"""help""": """Verbose memory tracing"""} )
a = field(
default=_lowercase , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
a = field(
default=_lowercase , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
a = field(default=_lowercase , metadata={"""help""": """Trace memory line by line"""} )
a = field(default=_lowercase , metadata={"""help""": """Save result to a CSV file"""} )
a = field(default=_lowercase , metadata={"""help""": """Save all print statements in a log file"""} )
a = field(default=_lowercase , metadata={"""help""": """Whether to print environment information"""} )
a = field(
default=_lowercase , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
a = field(
default=F"""inference_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
a = field(
default=F"""inference_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
a = field(
default=F"""train_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
a = field(
default=F"""train_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
a = field(
default=F"""env_info_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
a = field(
default=F"""log_{round(time() )}.csv""" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
a = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
a = field(
default=_lowercase , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def lowerCamelCase_ ( self: List[Any] ):
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , UpperCamelCase__ , )
def lowerCamelCase_ ( self: List[str] ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def lowerCamelCase_ ( self: int ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 41
|
A : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = len(__magic_name__ ) * [False]
lowercase__ = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
lowercase__ = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = []
lowercase__ = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
lowercase__ = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
lowercase__ = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 305
| 0
|
import heapq as hq
import math
from collections.abc import Iterator
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
UpperCamelCase_: Dict = str(id_ )
UpperCamelCase_: Union[str, Any] = None
UpperCamelCase_: Tuple = None
UpperCamelCase_: int = []
UpperCamelCase_: Dict = {} # {vertex:distance}
def __lt__( self , _lowerCamelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def _a ( self , _lowerCamelCase ):
self.neighbors.append(_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Any = weight
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , UpperCAmelCase__ )
graph[b - 1].add_edge(graph[a - 1] , UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> list:
UpperCamelCase_: Optional[int] = []
for u in graph:
UpperCamelCase_: str = math.inf
UpperCamelCase_: Dict = None
UpperCamelCase_: Union[str, Any] = 0
UpperCamelCase_: List[Any] = graph[:]
while q:
UpperCamelCase_: Tuple = min(UpperCAmelCase__ )
q.remove(UpperCAmelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCamelCase_: int = u
UpperCamelCase_: Any = u.edges[v.id]
for i in range(1 , len(UpperCAmelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Iterator[tuple]:
for u in graph:
UpperCamelCase_: Optional[int] = math.inf
UpperCamelCase_: Union[str, Any] = None
UpperCamelCase_: Optional[Any] = 0
UpperCamelCase_: Optional[int] = list(UpperCAmelCase__ )
hq.heapify(UpperCAmelCase__ )
while h:
UpperCamelCase_: Optional[Any] = hq.heappop(UpperCAmelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCamelCase_: str = u
UpperCamelCase_: Dict = u.edges[v.id]
hq.heapify(UpperCAmelCase__ )
for i in range(1 , len(UpperCAmelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case () -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> np.array:
UpperCamelCase_: Dict = F'''{sampling_rate}'''
UpperCamelCase_: Any = '1'
UpperCamelCase_: Any = 'f32le'
UpperCamelCase_: Union[str, Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(UpperCAmelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCamelCase_: Optional[Any] = ffmpeg_process.communicate(UpperCAmelCase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
UpperCamelCase_: Union[str, Any] = output_stream[0]
UpperCamelCase_: List[str] = np.frombuffer(UpperCAmelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = "f32le" , ) -> Tuple:
UpperCamelCase_: Any = F'''{sampling_rate}'''
UpperCamelCase_: Union[str, Any] = '1'
if format_for_conversion == "s16le":
UpperCamelCase_: Optional[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCamelCase_: int = platform.system()
if system == "Linux":
UpperCamelCase_: Tuple = 'alsa'
UpperCamelCase_: List[str] = 'default'
elif system == "Darwin":
UpperCamelCase_: int = 'avfoundation'
UpperCamelCase_: Union[str, Any] = ':0'
elif system == "Windows":
UpperCamelCase_: Tuple = 'dshow'
UpperCamelCase_: Dict = 'default'
UpperCamelCase_: Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCamelCase_: Optional[int] = _ffmpeg_stream(UpperCAmelCase__ , UpperCAmelCase__ )
for item in iterator:
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "f32le" , ) -> Any:
if stream_chunk_s is not None:
UpperCamelCase_: List[Any] = stream_chunk_s
else:
UpperCamelCase_: Dict = chunk_length_s
UpperCamelCase_: List[str] = ffmpeg_microphone(UpperCAmelCase__ , UpperCAmelCase__ , format_for_conversion=UpperCAmelCase__ )
if format_for_conversion == "s16le":
UpperCamelCase_: Union[str, Any] = np.intaa
UpperCamelCase_: List[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: str = np.floataa
UpperCamelCase_: Tuple = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCamelCase_: int = chunk_length_s / 6
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCAmelCase__ , (int, float) ):
UpperCamelCase_: Union[str, Any] = [stride_length_s, stride_length_s]
UpperCamelCase_: Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCamelCase_: Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCamelCase_: Optional[int] = datetime.datetime.now()
UpperCamelCase_: Optional[int] = datetime.timedelta(seconds=UpperCAmelCase__ )
for item in chunk_bytes_iter(UpperCAmelCase__ , UpperCAmelCase__ , stride=(stride_left, stride_right) , stream=UpperCAmelCase__ ):
# Put everything back in numpy scale
UpperCamelCase_: Tuple = np.frombuffer(item['raw'] , dtype=UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
UpperCamelCase_: int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> int:
UpperCamelCase_: str = b''
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCamelCase_: List[str] = 0
for raw in iterator:
acc += raw
if stream and len(UpperCAmelCase__ ) < chunk_len:
UpperCamelCase_: Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCAmelCase__ ) >= chunk_len:
# We are flushing the accumulator
UpperCamelCase_: int = (_stride_left, stride_right)
UpperCamelCase_: Optional[Any] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
UpperCamelCase_: Any = False
yield item
UpperCamelCase_: Optional[int] = stride_left
UpperCamelCase_: Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCAmelCase__ ) > stride_left:
UpperCamelCase_: int = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
UpperCamelCase_: Optional[Any] = False
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
UpperCamelCase_: Any = 2**2_4 # 16Mo
try:
with subprocess.Popen(UpperCAmelCase__ , stdout=subprocess.PIPE , bufsize=UpperCAmelCase__ ) as ffmpeg_process:
while True:
UpperCamelCase_: Any = ffmpeg_process.stdout.read(UpperCAmelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 292
| 0
|
def lowerCAmelCase_ ( __UpperCAmelCase: int | float | str ) -> tuple[int, int]:
try:
UpperCamelCase__ : Optional[Any] = float(__UpperCAmelCase )
except ValueError:
raise ValueError('''Please enter a valid number''' )
UpperCamelCase__ : int = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
UpperCamelCase__ : Dict = len(str(__UpperCAmelCase ).split('''.''' )[1] )
UpperCamelCase__ : Any = int(decimal * (10**number_of_frac_digits) )
UpperCamelCase__ : int = 10**number_of_frac_digits
UpperCamelCase__ ,UpperCamelCase__ : Any = denominator, numerator
while True:
UpperCamelCase__ : Any = dividend % divisor
if remainder == 0:
break
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = divisor, remainder
UpperCamelCase__ ,UpperCamelCase__ : Tuple = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 201
|
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> bool:
return str(__UpperCAmelCase ) == str(__UpperCAmelCase )[::-1]
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> int:
return int(__UpperCAmelCase ) + int(str(__UpperCAmelCase )[::-1] )
def lowerCAmelCase_ ( __UpperCAmelCase: int = 1_0000 ) -> int:
UpperCamelCase__ : Optional[Any] = []
for num in range(1 , __UpperCAmelCase ):
UpperCamelCase__ : str = 0
UpperCamelCase__ : Any = num
while iterations < 50:
UpperCamelCase__ : List[Any] = sum_reverse(__UpperCAmelCase )
iterations += 1
if is_palindrome(__UpperCAmelCase ):
break
else:
lychrel_nums.append(__UpperCAmelCase )
return len(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 201
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = KandinskyVaaPriorPipeline
lowerCAmelCase : Any = ["prompt"]
lowerCAmelCase : Optional[Any] = ["prompt", "negative_prompt"]
lowerCAmelCase : Optional[int] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowerCAmelCase : Union[str, Any] = False
@property
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return 100
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModelWithProjection(_snake_case )
@property
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : List[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
lowercase__ : Dict = PriorTransformer(**_snake_case )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase__ : List[str] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=224 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=14 ,)
lowercase__ : Optional[Any] = CLIPVisionModelWithProjection(_snake_case )
return model
@property
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = CLIPImageProcessor(
crop_size=224 ,do_center_crop=_snake_case ,do_normalize=_snake_case ,do_resize=_snake_case ,image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] ,image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] ,resample=3 ,size=224 ,)
return image_processor
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.dummy_prior
lowercase__ : Union[str, Any] = self.dummy_image_encoder
lowercase__ : Optional[int] = self.dummy_text_encoder
lowercase__ : Optional[Any] = self.dummy_tokenizer
lowercase__ : Union[str, Any] = self.dummy_image_processor
lowercase__ : Tuple = UnCLIPScheduler(
variance_type='''fixed_small_log''' ,prediction_type='''sample''' ,num_train_timesteps=1_000 ,clip_sample=_snake_case ,clip_sample_range=10.0 ,)
lowercase__ : Dict = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Tuple=0 ) -> int:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : Optional[int] = torch.manual_seed(_snake_case )
else:
lowercase__ : Any = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Tuple = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = '''cpu'''
lowercase__ : Optional[Any] = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**_snake_case )
lowercase__ : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = pipe(**self.get_dummy_inputs(_snake_case ) )
lowercase__ : Any = output.image_embeds
lowercase__ : List[str] = pipe(
**self.get_dummy_inputs(_snake_case ) ,return_dict=_snake_case ,)[0]
lowercase__ : str = image[0, -10:]
lowercase__ : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
lowercase__ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
lowercase__ : int = torch_device == '''cpu'''
lowercase__ : Any = True
lowercase__ : str = False
self._test_inference_batch_single_identical(
test_max_difference=_snake_case ,relax_max_difference=_snake_case ,test_mean_pixel_difference=_snake_case ,)
@skip_mps
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = torch_device == '''cpu'''
lowercase__ : Optional[int] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_snake_case ,test_mean_pixel_difference=_snake_case ,)
| 302
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : Dict = [3, 3, 3, 3]
lowercase__ : str = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : List[str] = [4, 4, 4, 4]
lowercase__ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
else:
lowercase__ : Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[int] = 96
elif "small" in model_name:
lowercase__ : Union[str, Any] = 96
elif "base" in model_name:
lowercase__ : Tuple = 1_28
elif "large" in model_name:
lowercase__ : Any = 1_92
elif "xlarge" in model_name:
lowercase__ : Any = 2_56
elif "huge" in model_name:
lowercase__ : Union[str, Any] = 3_52
# set label information
lowercase__ : List[Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ : Optional[int] = '''imagenet-22k-id2label.json'''
else:
lowercase__ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Dict = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : int = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowercase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ : Dict = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ : Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ : Union[str, Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ : Dict = '''layernorm.bias'''
if "head" in name:
lowercase__ : Dict = name.replace('''head''' , '''classifier''' )
else:
lowercase__ : List[Any] = '''focalnet.''' + name
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[str]:
# fmt: off
lowercase__ : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ : Optional[int] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowercase__ : str = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ : int = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
lowercase__ : List[Any] = get_focalnet_config(__lowerCamelCase )
lowercase__ : Optional[int] = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowercase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : int = BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=2_24 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowercase__ : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase__ : List[str] = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ : Optional[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
lowercase__ : Optional[Any] = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__ : Optional[int] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Dict = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__ : List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__ : List[str] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = tempfile.mkdtemp()
lowerCAmelCase_ :Union[str, Any] = 8
# DPR tok
lowerCAmelCase_ :Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase_ :Union[str, Any] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCAmelCase_ :Tuple = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase_ :Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase_ :Tuple = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase_ :Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase_ :Optional[Any] = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCAmelCase_ :Any = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def __lowerCAmelCase ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def __lowerCAmelCase ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def __lowerCAmelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Tuple = os.path.join(self.tmpdirname , """rag_tokenizer""" )
lowerCAmelCase_ :Optional[int] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCAmelCase_ :str = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__UpperCAmelCase )
rag_tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase_ :Union[str, Any] = RagTokenizer.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
lowerCAmelCase_ :Dict = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowerCAmelCase_ :Optional[Any] = tokenizer(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
lowerCAmelCase_ :List[str] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowerCAmelCase_ :Any = tokenizer(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 84
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
A_ : Dict = logging.get_logger(__name__)
A_ : Any = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Tuple = 'deberta-v2'
def __init__( self : Any , __UpperCAmelCase : Optional[Any]=1_2_8_1_0_0 , __UpperCAmelCase : Optional[Any]=1_5_3_6 , __UpperCAmelCase : List[Any]=2_4 , __UpperCAmelCase : str=2_4 , __UpperCAmelCase : Optional[int]=6_1_4_4 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[Any]=5_1_2 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : Any=1e-7 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Any=-1 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Union[str, Any]="gelu" , **__UpperCAmelCase : Any , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = relative_attention
SCREAMING_SNAKE_CASE__ = max_relative_positions
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = position_biased_input
# Backwards compatibility
if type(__UpperCAmelCase ) == str:
SCREAMING_SNAKE_CASE__ = [x.strip() for x in pos_att_type.lower().split("""|""" )]
SCREAMING_SNAKE_CASE__ = pos_att_type
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kwargs.get("""pooler_hidden_size""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pooler_dropout
SCREAMING_SNAKE_CASE__ = pooler_hidden_act
class lowerCamelCase (A__ ):
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 1_2
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional["TensorType"] = None , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 4_0 , __UpperCAmelCase : int = 4_0 , __UpperCAmelCase : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ = super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 165
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''dpt'''
def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : List[str]=3072 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Optional[Any]=1E-12 , __UpperCAmelCase : List[str]=384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : int=3 , __UpperCAmelCase : str=False , __UpperCAmelCase : int=True , __UpperCAmelCase : int=[2, 5, 8, 11] , __UpperCAmelCase : Union[str, Any]="project" , __UpperCAmelCase : List[Any]=[4, 2, 1, 0.5] , __UpperCAmelCase : List[Any]=[96, 192, 384, 768] , __UpperCAmelCase : Any=256 , __UpperCAmelCase : Optional[int]=-1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=0.4 , __UpperCAmelCase : Tuple=255 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Optional[Any]=[1, 1024, 24, 24] , __UpperCAmelCase : int=[0, 1] , __UpperCAmelCase : int=None , **__UpperCAmelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_A = hidden_size
_A = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
_A = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
_A = BitConfig(**__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
logger.info("Initializing the config with a `BiT` backbone." )
_A = BitConfig(**__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_A = backbone_featmap_shape
_A = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
_A = None
_A = None
_A = []
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
_A = readout_type
_A = reassemble_factors
_A = neck_hidden_sizes
_A = fusion_hidden_size
_A = head_in_index
_A = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_A = use_auxiliary_head
_A = auxiliary_loss_weight
_A = semantic_loss_ignore_index
_A = semantic_classifier_dropout
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_A = self.backbone_config.to_dict()
_A = self.__class__.model_type
return output
| 174
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
_A = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = "sgugger/tiny-distilbert-classification"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = "patrickvonplaten/t5-tiny-random"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , configs=[config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__UpperCAmelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__UpperCAmelCase , "env.csv" ) , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "env.csv" ) ).exists() )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCAmelCase : Any ):
self.assertTrue(hasattr(__UpperCAmelCase , "sequential" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "cumulative" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "current" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , "log.txt" ) , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "log.txt" ) ).exists() )
| 174
| 1
|
"""simple docstring"""
import argparse
import os
import re
lowercase__ = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowercase__ = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ = re.compile(R"""\[([^\]]+)\]""")
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[Any] = _re_indent.search(lowercase__ )
return "" if search is None else search.groups()[0]
def _snake_case ( lowercase__ , lowercase__="" , lowercase__=None , lowercase__=None ):
_lowerCamelCase : str = 0
_lowerCamelCase : List[str] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowercase__ ):
index += 1
_lowerCamelCase : Union[str, Any] = ['\n'.join(lines[:index] )]
else:
_lowerCamelCase : Optional[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCamelCase : Optional[int] = [lines[index]]
index += 1
while index < len(lowercase__ ) and (end_prompt is None or not lines[index].startswith(lowercase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowercase__ ) )
if index < len(lowercase__ ) - 1:
_lowerCamelCase : List[str] = [lines[index + 1]]
index += 1
else:
_lowerCamelCase : List[str] = []
else:
blocks.append('\n'.join(lowercase__ ) )
_lowerCamelCase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase__ ) > 0:
blocks.append('\n'.join(lowercase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _snake_case ( lowercase__ ):
def _inner(lowercase__ ):
return key(lowercase__ ).lower().replace('_' , '' )
return _inner
def _snake_case ( lowercase__ , lowercase__=None ):
# If no key is provided, we use a noop.
def noop(lowercase__ ):
return x
if key is None:
_lowerCamelCase : List[str] = noop
# Constants are all uppercase, they go first.
_lowerCamelCase : List[Any] = [obj for obj in objects if key(lowercase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCamelCase : Union[str, Any] = [obj for obj in objects if key(lowercase__ )[0].isupper() and not key(lowercase__ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCamelCase : str = [obj for obj in objects if not key(lowercase__ )[0].isupper()]
_lowerCamelCase : Any = ignore_underscore(lowercase__ )
return sorted(lowercase__ , key=lowercase__ ) + sorted(lowercase__ , key=lowercase__ ) + sorted(lowercase__ , key=lowercase__ )
def _snake_case ( lowercase__ ):
# This inner function sort imports between [ ].
def _replace(lowercase__ ):
_lowerCamelCase : Any = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
_lowerCamelCase : Any = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCamelCase : Optional[Any] = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(lowercase__ )] ) + "]"
_lowerCamelCase : Any = import_statement.split('\n' )
if len(lowercase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCamelCase : str = 2 if lines[1].strip() == '[' else 1
_lowerCamelCase : str = [(i, _re_strip_line.search(lowercase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCamelCase : int = sort_objects(lowercase__ , key=lambda lowercase__ : x[1] )
_lowerCamelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCamelCase : List[str] = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCamelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCamelCase : str = keys[:-1]
_lowerCamelCase : str = get_indent(lines[1] ) + ', '.join([f'''"{k}"''' for k in sort_objects(lowercase__ )] )
return "\n".join(lowercase__ )
else:
# Finally we have to deal with imports fitting on one line
_lowerCamelCase : List[Any] = _re_bracket_content.sub(_replace , lowercase__ )
return import_statement
def _snake_case ( lowercase__ , lowercase__=True ):
with open(lowercase__ , encoding='utf-8' ) as f:
_lowerCamelCase : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCamelCase : Any = split_code_in_indented_blocks(
lowercase__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCamelCase : Optional[Any] = main_blocks[block_idx]
_lowerCamelCase : str = block.split('\n' )
# Get to the start of the imports.
_lowerCamelCase : Union[str, Any] = 0
while line_idx < len(lowercase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCamelCase : Any = len(lowercase__ )
else:
line_idx += 1
if line_idx >= len(lowercase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCamelCase : Optional[int] = '\n'.join(block_lines[line_idx:-1] )
_lowerCamelCase : Any = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCamelCase : int = split_code_in_indented_blocks(lowercase__ , indent_level=lowercase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCamelCase : Union[str, Any] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCamelCase : str = [(pattern.search(lowercase__ ).groups()[0] if pattern.search(lowercase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCamelCase : Tuple = [(i, key) for i, key in enumerate(lowercase__ ) if key is not None]
_lowerCamelCase : Any = [x[0] for x in sorted(lowercase__ , key=lambda lowercase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCamelCase : Any = 0
_lowerCamelCase : Dict = []
for i in range(len(lowercase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCamelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowercase__ )
count += 1
# And we put our main block back together with its first and last line.
_lowerCamelCase : int = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase__ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(lowercase__ ) )
def _snake_case ( lowercase__=True ):
_lowerCamelCase : Dict = []
for root, _, files in os.walk(lowercase__ ):
if "__init__.py" in files:
_lowerCamelCase : Union[str, Any] = sort_imports(os.path.join(lowercase__ , '__init__.py' ) , check_only=lowercase__ )
if result:
_lowerCamelCase : Dict = [os.path.join(lowercase__ , '__init__.py' )]
if len(lowercase__ ) > 0:
raise ValueError(f'''Would overwrite {len(lowercase__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowercase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 96
|
"""simple docstring"""
import math
def _snake_case ( lowercase__ ):
return math.sqrt(lowercase__ ) * math.sqrt(lowercase__ ) == num
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[Any] = n
while left <= right:
_lowerCamelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCamelCase : str = mid - 1
else:
_lowerCamelCase : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
| 1
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
SCREAMING_SNAKE_CASE_: str =typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
SCREAMING_SNAKE_CASE_: int =typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase_ ( snake_case_ : Vector , snake_case_ : Vector ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_lowercase ) - np.asarray(_lowercase )) ** 2 ) )
def lowerCAmelCase_ ( snake_case_ : Vector , snake_case_ : Vector ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_lowercase , _lowercase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 363
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ='Hello world! cécé herlolip'
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , snake_case_ : bool ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = FairseqRobertaModel.from_pretrained(snake_case_ )
roberta.eval() # disable dropout
UpperCAmelCase_ = roberta.model.encoder.sentence_encoder
UpperCAmelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , snake_case_ )
UpperCAmelCase_ = XLMRobertaXLForSequenceClassification(snake_case_ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase_ = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase_ = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ = model.roberta.encoder.layer[i]
UpperCAmelCase_ = roberta_sent_encoder.layers[i]
UpperCAmelCase_ = layer.attention
UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase_ = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase_ = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase_ = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase_ = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase_ = roberta_layer.final_layer_norm.weight
UpperCAmelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ = roberta_layer.fca.weight
UpperCAmelCase_ = roberta_layer.fca.bias
# output
UpperCAmelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ = roberta_layer.fca.weight
UpperCAmelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].dense.weight
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].dense.bias
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.weight
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ = roberta.model.encoder.lm_head.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ = roberta.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase_ = model(snake_case_ )[0]
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"](roberta.extract_features(snake_case_ ) )
else:
UpperCAmelCase_ = roberta.model(snake_case_ )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCAmelCase_ = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 106
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def a__ ( A_ ):
'''simple docstring'''
if isinstance(A_, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A_, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A_ ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class UpperCAmelCase_ ( lowercase_ ):
'''simple docstring'''
a__ = ["""pixel_values"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : str , ) -> Tuple:
"""simple docstring"""
super().__init__(**__UpperCamelCase )
__magic_name__ = size if size is not None else {"""shortest_edge""": 256}
__magic_name__ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
__magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__magic_name__ = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = resample
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = offset
__magic_name__ = do_normalize
__magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> int:
"""simple docstring"""
__magic_name__ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
__magic_name__ = get_resize_output_image_size(__UpperCamelCase , size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
__magic_name__ = (size["""height"""], size["""width"""])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> List[Any]:
"""simple docstring"""
__magic_name__ = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = image.astype(np.floataa )
if offset:
__magic_name__ = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> int:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
__magic_name__ = to_numpy_array(__UpperCamelCase )
if do_resize:
__magic_name__ = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
__magic_name__ = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
__magic_name__ = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
__magic_name__ = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
__magic_name__ = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def _lowercase ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Tuple , ) -> int:
"""simple docstring"""
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ = offset if offset is not None else self.offset
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = image_mean if image_mean is not None else self.image_mean
__magic_name__ = image_std if image_std is not None else self.image_std
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
__magic_name__ = crop_size if crop_size is not None else self.crop_size
__magic_name__ = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
__magic_name__ = make_batched(__UpperCamelCase )
__magic_name__ = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
__magic_name__ = {"""pixel_values""": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 88
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292
| 0
|
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
lowerCAmelCase__ :List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ :str = image_classifier(__UpperCAmelCase , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__UpperCAmelCase ) , [
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}],
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}],
] , )
lowerCAmelCase__ :str = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
lowerCAmelCase__ :int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ :Optional[int] = image_classifier(__UpperCAmelCase , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}] , )
lowerCAmelCase__ :Optional[Any] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
{'score': 0.3_33, 'label': ANY(__UpperCAmelCase )},
],
] , )
@slow
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ :Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ :Optional[int] = image_classifier(__UpperCAmelCase , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
lowerCAmelCase__ :str = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ :Tuple = image_classifier(__UpperCAmelCase , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
lowerCAmelCase__ :int = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
| 254
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Dict = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCAmelCase__ :Tuple = [144, 192, 240]
lowerCAmelCase__ :List[str] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCAmelCase__ :List[str] = [96, 120, 144]
lowerCAmelCase__ :List[Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCAmelCase__ :Union[str, Any] = [64, 80, 96]
lowerCAmelCase__ :int = [16, 16, 24, 48, 64, 80, 320]
lowerCAmelCase__ :Optional[Any] = 0.0_5
lowerCAmelCase__ :Tuple = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
lowerCAmelCase__ :int = 512
lowerCAmelCase__ :Optional[Any] = 16
lowerCAmelCase__ :int = 21
lowerCAmelCase__ :Tuple = 'pascal-voc-id2label.json'
else:
lowerCAmelCase__ :int = 1000
lowerCAmelCase__ :Optional[Any] = 'imagenet-1k-id2label.json'
lowerCAmelCase__ :Optional[int] = 'huggingface/label-files'
lowerCAmelCase__ :List[str] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ :List[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ :Dict = idalabel
lowerCAmelCase__ :List[Any] = {v: k for k, v in idalabel.items()}
return config
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[int]:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
lowerCAmelCase__ :List[Any] = name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
lowerCAmelCase__ :Dict = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
lowerCAmelCase__ :List[str] = name.replace('.block.' , '.' )
if "exp_1x1" in name:
lowerCAmelCase__ :str = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
lowerCAmelCase__ :Any = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
lowerCAmelCase__ :List[Any] = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
lowerCAmelCase__ :Any = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
lowerCAmelCase__ :Union[str, Any] = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
lowerCAmelCase__ :List[str] = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
lowerCAmelCase__ :List[str] = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
lowerCAmelCase__ :Optional[int] = name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
lowerCAmelCase__ :Dict = name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
lowerCAmelCase__ :Dict = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
lowerCAmelCase__ :Optional[int] = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
lowerCAmelCase__ :Optional[Any] = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
lowerCAmelCase__ :Tuple = name.replace(F".global_rep.{i}.weight" , '.layernorm.weight' )
if F".global_rep.{i}.bias" in name:
lowerCAmelCase__ :Any = name.replace(F".global_rep.{i}.bias" , '.layernorm.bias' )
if ".global_rep." in name:
lowerCAmelCase__ :List[str] = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
lowerCAmelCase__ :int = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCAmelCase__ :Any = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
lowerCAmelCase__ :Optional[Any] = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
lowerCAmelCase__ :Optional[Any] = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
lowerCAmelCase__ :Tuple = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
lowerCAmelCase__ :Any = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
lowerCAmelCase__ :str = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
lowerCAmelCase__ :Optional[int] = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
lowerCAmelCase__ :Optional[int] = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
lowerCAmelCase__ :int = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
lowerCAmelCase__ :Union[str, Any] = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCAmelCase__ :Optional[Any] = 'mobilevit.' + name
return name
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Union[str, Any]:
"""simple docstring"""
if base_model:
lowerCAmelCase__ :Union[str, Any] = ''
else:
lowerCAmelCase__ :Tuple = 'mobilevit.'
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ :Union[str, Any] = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
lowerCAmelCase__ :Optional[int] = key[8:]
if "qkv" in key:
lowerCAmelCase__ :Tuple = key.split('.' )
lowerCAmelCase__ :List[Any] = int(key_split[0][6:] ) - 1
lowerCAmelCase__ :Any = int(key_split[3] )
lowerCAmelCase__ :Optional[int] = model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
lowerCAmelCase__ :Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCAmelCase__ :List[Any] = (
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
lowerCAmelCase__ :str = val[:dim, :]
lowerCAmelCase__ :int = val[dim : dim * 2, :]
lowerCAmelCase__ :Optional[int] = val[-dim:, :]
else:
lowerCAmelCase__ :Union[str, Any] = val[:dim]
lowerCAmelCase__ :Any = val[dim : dim * 2]
lowerCAmelCase__ :Tuple = val[-dim:]
else:
lowerCAmelCase__ :List[str] = val
return orig_state_dict
def __A () ->str:
"""simple docstring"""
lowerCAmelCase__ :Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ :Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :int = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
lowerCAmelCase__ :Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
lowerCAmelCase__ :Optional[int] = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
lowerCAmelCase__ :Optional[int] = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase__ :Tuple = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCAmelCase__ :List[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCAmelCase__ :Optional[int] = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ :List[Any] = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCAmelCase__ :Optional[int] = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCAmelCase__ :Dict = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCAmelCase__ :Tuple = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowerCAmelCase__ :Union[str, Any] = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
lowerCAmelCase__ :Any = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
lowerCAmelCase__ :str = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
lowerCAmelCase__ :Dict = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
lowerCAmelCase__ :str = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization='apple' )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='apple' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__A = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 254
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ):
"""simple docstring"""
__a , __a = len(_SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__a = 0
count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
from functools import lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
__a = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
while True:
# Increment each value of a generated range
__a = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ):
"""simple docstring"""
__a = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 302
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> bool:
return len(set(snake_case_ ) ) == len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238
|
from __future__ import annotations
snake_case_ = 'Muhammad Umer Farooq'
snake_case_ = 'MIT'
snake_case_ = '1.0.0'
snake_case_ = 'Muhammad Umer Farooq'
snake_case_ = 'contact@muhammadumerfarooq.me'
snake_case_ = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Dict , a__ : str ):
"""simple docstring"""
super().__init__()
__snake_case = []
__snake_case = domain
def a (self : Tuple , a__ : str , a__ : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case = parse.urljoin(self.domain , a__ )
self.urls.append(a__ )
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return ".".join(get_sub_domain_name(snake_case_ ).split('''.''' )[-2:] )
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return parse.urlparse(snake_case_ ).netloc
def lowerCamelCase__ ( snake_case_ : str = "https://github.com" ) -> list[str]:
__snake_case = get_domain_name(snake_case_ )
# Initialize the parser
__snake_case = Parser(snake_case_ )
try:
# Open URL
__snake_case = requests.get(snake_case_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case = requests.get(snake_case_ )
# Get the valid email.
__snake_case = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(snake_case_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(snake_case_ )
if __name__ == "__main__":
snake_case_ = emails_from_url('https://github.com')
print(F'{len(emails)} emails found:')
print('\n'.join(sorted(emails)))
| 238
| 1
|
'''simple docstring'''
_UpperCAmelCase : Tuple = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
1_0: """a""",
1_1: """b""",
1_2: """c""",
1_3: """d""",
1_4: """e""",
1_5: """f""",
}
def __magic_name__( lowerCamelCase):
assert type(lowerCamelCase) in (int, float) and decimal == int(lowerCamelCase)
__lowerCAmelCase = int(lowerCamelCase)
__lowerCAmelCase = ''''''
__lowerCAmelCase = False
if decimal < 0:
__lowerCAmelCase = True
decimal *= -1
while decimal > 0:
__lowerCAmelCase , __lowerCAmelCase = divmod(lowerCamelCase, 1_6)
__lowerCAmelCase = values[remainder] + hexadecimal
__lowerCAmelCase = '''0x''' + hexadecimal
if negative:
__lowerCAmelCase = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174
| 1
|
def __lowerCamelCase ( lowerCamelCase__ = 50_000_000 ):
"""simple docstring"""
lowercase__ : List[Any] = set()
lowercase__ : Any = int((limit - 24) ** (1 / 2) )
lowercase__ : str = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCamelCase__ ) ) )
for primea in primes:
lowercase__ : Optional[int] = primea * primea
for primea in primes:
lowercase__ : Optional[int] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase__ : Optional[int] = primea * primea * primea * primea
lowercase__ : Dict = square + cube + tetr
if total >= limit:
break
ret.add(lowerCamelCase__ )
return len(lowerCamelCase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 360
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 121
| 0
|
def lowerCamelCase__ ( _a , _a):
return int((input_a, input_a).count(1) != 0)
def lowerCamelCase__ ( ):
assert or_gate(0 , 0) == 0
assert or_gate(0 , 1) == 1
assert or_gate(1 , 0) == 1
assert or_gate(1 , 1) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 76
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCamelCase : Tuple = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__UpperCamelCase : Optional[int] = F'''https://www.google.com/search?q={query}&num=100'''
__UpperCamelCase : Optional[Any] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__UpperCamelCase : Union[str, Any] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__UpperCamelCase : str = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 106
| 0
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__a = {
'''E''': 12.70,
'''T''': 9.0_6,
'''A''': 8.1_7,
'''O''': 7.5_1,
'''I''': 6.9_7,
'''N''': 6.7_5,
'''S''': 6.3_3,
'''H''': 6.0_9,
'''R''': 5.9_9,
'''D''': 4.2_5,
'''L''': 4.0_3,
'''C''': 2.7_8,
'''U''': 2.7_6,
'''M''': 2.4_1,
'''W''': 2.3_6,
'''F''': 2.2_3,
'''G''': 2.0_2,
'''Y''': 1.9_7,
'''P''': 1.9_3,
'''B''': 1.2_9,
'''V''': 0.9_8,
'''K''': 0.7_7,
'''J''': 0.1_5,
'''X''': 0.1_5,
'''Q''': 0.1_0,
'''Z''': 0.0_7,
}
__a = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
__a = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __lowercase ( _UpperCamelCase ) ->Dict:
"""simple docstring"""
lowercase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowercase ( _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
return x[0]
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : Optional[int] = get_letter_count(_a )
lowercase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_a )
lowercase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=_a )
lowercase : List[str] = """""".join(freq_to_letter[freq] )
lowercase : Optional[int] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_a, reverse=_a )
lowercase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_a )
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
lowercase : Tuple = get_frequency_order(_a )
lowercase : Dict = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Any = os.path.abspath(_UpperCamelCase )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
lowercase : Optional[int] = tf.train.list_variables(_UpperCamelCase )
lowercase : Optional[int] = []
lowercase : Optional[int] = []
lowercase : Optional[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase : int = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase : List[Any] = name[1:]
# figure out how many levels deep the name is
lowercase : Optional[int] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(_UpperCamelCase )
# read data
lowercase : Any = tf.train.load_variable(_UpperCamelCase, _UpperCamelCase )
names.append('''/'''.join(_UpperCamelCase ) )
arrays.append(_UpperCamelCase )
logger.info(f"""Read a total of {len(_UpperCamelCase ):,} layers""" )
# Sanity check
if len(set(_UpperCamelCase ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(_UpperCamelCase ) )})""" )
lowercase : List[str] = list(set(_UpperCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(_UpperCamelCase, _UpperCamelCase ):
lowercase : Optional[int] = full_name.split('''/''' )
lowercase : Tuple = model
lowercase : Any = []
for i, m_name in enumerate(_UpperCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
lowercase : Tuple = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
lowercase : int = getattr(_UpperCamelCase, '''embeddings''' )
lowercase : Optional[Any] = getattr(_UpperCamelCase, '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
lowercase : int = getattr(_UpperCamelCase, '''encoder''' )
lowercase : Tuple = getattr(_UpperCamelCase, '''layer''' )
lowercase : List[Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
lowercase : str = getattr(_UpperCamelCase, '''pooler''' )
lowercase : str = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
lowercase : Optional[int] = getattr(_UpperCamelCase, '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
lowercase : str = getattr(_UpperCamelCase, '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
lowercase : List[Any] = getattr(_UpperCamelCase, '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
lowercase : int = getattr(_UpperCamelCase, '''token_type_embeddings''' )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
lowercase : Tuple = getattr(_UpperCamelCase, '''attention''' )
lowercase : str = getattr(_UpperCamelCase, '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
lowercase : Dict = getattr(_UpperCamelCase, '''attention''' )
lowercase : Any = getattr(_UpperCamelCase, '''output''' )
lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''attention''' )
lowercase : str = getattr(_UpperCamelCase, '''output''' )
lowercase : Optional[int] = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
lowercase : List[str] = getattr(_UpperCamelCase, '''output''' )
lowercase : str = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
lowercase : Dict = getattr(_UpperCamelCase, '''output''' )
lowercase : int = getattr(_UpperCamelCase, '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
lowercase : Optional[Any] = getattr(_UpperCamelCase, '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
lowercase : Dict = getattr(_UpperCamelCase, '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
lowercase : Optional[Any] = getattr(_UpperCamelCase, '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
lowercase : List[str] = getattr(_UpperCamelCase, '''intermediate''' )
lowercase : Optional[int] = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
lowercase : Tuple = getattr(_UpperCamelCase, '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
lowercase : str = getattr(_UpperCamelCase, '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
lowercase : Dict = getattr(_UpperCamelCase, '''weight''' )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
lowercase : Any = '''.'''.join(_UpperCamelCase )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''', _UpperCamelCase ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''', _UpperCamelCase ):
lowercase : Any = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase : List[str] = array.transpose()
if pointer.shape == array.shape:
lowercase : Optional[Any] = torch.from_numpy(_UpperCamelCase )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Tuple:
"""simple docstring"""
logger.info(f"""Loading model based on config from {config_path}...""" )
lowercase : List[Any] = BertConfig.from_json_file(_UpperCamelCase )
lowercase : Dict = BertModel(_UpperCamelCase )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict(), _UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__a = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 173
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["image_processor", "tokenizer"]
_SCREAMING_SNAKE_CASE : Dict = "Pix2StructImageProcessor"
_SCREAMING_SNAKE_CASE : Optional[int] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__UpperCAmelCase : Any = False
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 2_048 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
__UpperCAmelCase : List[Any] = self.tokenizer
__UpperCAmelCase : int = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__UpperCAmelCase : Dict = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , max_patches=__UpperCAmelCase , **__UpperCAmelCase )
else:
# add pixel_values and bbox
__UpperCAmelCase : Dict = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , max_patches=__UpperCAmelCase , header_text=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
__UpperCAmelCase : Dict = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if "attention_mask" in text_encoding:
__UpperCAmelCase : Dict = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
__UpperCAmelCase : Optional[int] = text_encoding.pop("""input_ids""" )
else:
__UpperCAmelCase : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCAmelCase )
return encoding_image_processor
def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : int = self.tokenizer.model_input_names
__UpperCAmelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 254
|
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : list[list[int]] = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__UpperCAmelCase : str = 1
for n in range(m + 1 ):
for k in range(1 , lowerCAmelCase__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_UpperCamelCase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
_UpperCamelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 254
| 1
|
'''simple docstring'''
from __future__ import annotations
from random import choice
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
return choice(snake_case )
def UpperCamelCase_( snake_case : list[int] , snake_case : int ):
'''simple docstring'''
snake_case_ = random_pivot(snake_case )
# partition based on pivot
# linear time
snake_case_ = [e for e in lst if e < pivot]
snake_case_ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(snake_case ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(snake_case ) < k - 1:
return kth_number(snake_case , k - len(snake_case ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(snake_case , snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "mgp-str"
def __init__( self , a__=[32, 128] , a__=4 , a__=3 , a__=27 , a__=38 , a__=50_257 , a__=30_522 , a__=768 , a__=12 , a__=12 , a__=4.0 , a__=True , a__=False , a__=1e-5 , a__=0.0 , a__=0.0 , a__=0.0 , a__=False , a__=0.0_2 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = max_token_length
snake_case_ = num_character_labels
snake_case_ = num_bpe_labels
snake_case_ = num_wordpiece_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = mlp_ratio
snake_case_ = distilled
snake_case_ = layer_norm_eps
snake_case_ = drop_rate
snake_case_ = qkv_bias
snake_case_ = attn_drop_rate
snake_case_ = drop_path_rate
snake_case_ = output_aa_attentions
snake_case_ = initializer_range
| 92
| 1
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __get__( self : Tuple, lowerCamelCase : List[str], lowerCamelCase : Optional[int]=None )-> List[str]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowerCamelCase__ : List[str] ='''__cached_''' + self.fget.__name__
lowerCamelCase__ : List[Any] =getattr(lowerCamelCase, lowerCamelCase, lowerCamelCase )
if cached is None:
lowerCamelCase__ : Optional[int] =self.fget(lowerCamelCase )
setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return cached
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if is_torch_fx_proxy(__lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(__lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(__lowerCamelCase , np.ndarray )
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
return isinstance(__lowerCamelCase , np.ndarray )
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return _is_numpy(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
import torch
return isinstance(__lowerCamelCase , torch.Tensor )
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
import torch
return isinstance(__lowerCamelCase , torch.device )
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
import torch
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if hasattr(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Tuple =getattr(__lowerCamelCase , __lowerCamelCase )
else:
return False
return isinstance(__lowerCamelCase , torch.dtype )
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
import tensorflow as tf
return isinstance(__lowerCamelCase , tf.Tensor )
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__lowerCamelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(__lowerCamelCase )
return type(__lowerCamelCase ) == tf.Tensor
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(__lowerCamelCase , jnp.ndarray )
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(__lowerCamelCase ) for k, v in obj.items()}
elif isinstance(__lowerCamelCase , (list, tuple) ):
return [to_py_obj(__lowerCamelCase ) for o in obj]
elif is_tf_tensor(__lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(__lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__lowerCamelCase ):
return np.asarray(__lowerCamelCase ).tolist()
elif isinstance(__lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(__lowerCamelCase ) for k, v in obj.items()}
elif isinstance(__lowerCamelCase , (list, tuple) ):
return np.array(__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(__lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__lowerCamelCase ):
return np.asarray(__lowerCamelCase )
else:
return obj
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =fields(self )
# Safety and consistency checks
if not len(lowerCamelCase ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
lowerCamelCase__ : List[Any] =getattr(self, class_fields[0].name )
lowerCamelCase__ : Union[str, Any] =all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCamelCase ):
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Optional[int] =first_field.items()
lowerCamelCase__ : Union[str, Any] =True
else:
try:
lowerCamelCase__ : int =iter(lowerCamelCase )
lowerCamelCase__ : List[Any] =True
except TypeError:
lowerCamelCase__ : List[Any] =False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCamelCase ):
if (
not isinstance(lowerCamelCase, (list, tuple) )
or not len(lowerCamelCase ) == 2
or not isinstance(element[0], lowerCamelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCamelCase__ : Optional[int] =first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
lowerCamelCase__ : str =element[1]
elif first_field is not None:
lowerCamelCase__ : Dict =first_field
else:
for field in class_fields:
lowerCamelCase__ : Union[str, Any] =getattr(self, field.name )
if v is not None:
lowerCamelCase__ : Optional[int] =v
def __delitem__( self : int, *lowerCamelCase : List[str], **lowerCamelCase : Optional[int] )-> str:
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def snake_case ( self : Optional[int], *lowerCamelCase : int, **lowerCamelCase : List[str] )-> Optional[Any]:
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def snake_case ( self : Dict, *lowerCamelCase : Optional[int], **lowerCamelCase : Optional[Any] )-> int:
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def snake_case ( self : List[Any], *lowerCamelCase : Tuple, **lowerCamelCase : List[Any] )-> Optional[int]:
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Optional[Any], lowerCamelCase : Optional[int] )-> List[Any]:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] =dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : List[str] )-> Dict:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCamelCase, lowerCamelCase )
super().__setattr__(lowerCamelCase, lowerCamelCase )
def __setitem__( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : int )-> List[Any]:
# Will raise a KeyException if needed
super().__setitem__(lowerCamelCase, lowerCamelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCamelCase, lowerCamelCase )
def snake_case ( self : str )-> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@classmethod
def snake_case ( cls : Optional[Any], lowerCamelCase : int )-> str:
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'longest'
_a = 'max_length'
_a = 'do_not_pad'
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'pt'
_a = 'tf'
_a = 'np'
_a = 'jax'
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int], lowerCamelCase : List[ContextManager] )-> str:
lowerCamelCase__ : List[str] =context_managers
lowerCamelCase__ : int =ExitStack()
def __enter__( self : List[str] )-> Union[str, Any]:
for context_manager in self.context_managers:
self.stack.enter_context(lowerCamelCase )
def __exit__( self : Tuple, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Tuple )-> List[Any]:
self.stack.__exit__(*lowerCamelCase, **lowerCamelCase )
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Tuple =infer_framework(__lowerCamelCase )
if framework == "tf":
lowerCamelCase__ : Any =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__ : Tuple =inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__ : List[str] =inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =model_class.__name__
lowerCamelCase__ : Tuple =infer_framework(__lowerCamelCase )
if framework == "tf":
lowerCamelCase__ : int =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__ : Any =inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__ : Union[str, Any] =inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def snake_case__ ( __lowerCamelCase : MutableMapping , __lowerCamelCase : str = "" , __lowerCamelCase : str = "." ):
"""simple docstring"""
def _flatten_dict(__lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]="" , __lowerCamelCase : str="." ):
for k, v in d.items():
lowerCamelCase__ : List[str] =str(__lowerCamelCase ) + delimiter + str(__lowerCamelCase ) if parent_key else k
if v and isinstance(__lowerCamelCase , __lowerCamelCase ):
yield from flatten_dict(__lowerCamelCase , __lowerCamelCase , delimiter=__lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
@contextmanager
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : bool = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=None ):
"""simple docstring"""
if is_numpy_array(__lowerCamelCase ):
return np.transpose(__lowerCamelCase , axes=__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.T if axes is None else array.permute(*__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.transpose(__lowerCamelCase , perm=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.transpose(__lowerCamelCase , axes=__lowerCamelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(__lowerCamelCase )}.''' )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
if is_numpy_array(__lowerCamelCase ):
return np.reshape(__lowerCamelCase , __lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.reshape(*__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.reshape(__lowerCamelCase , __lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.reshape(__lowerCamelCase , __lowerCamelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(__lowerCamelCase )}.''' )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=None ):
"""simple docstring"""
if is_numpy_array(__lowerCamelCase ):
return np.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(__lowerCamelCase )}.''' )
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
if is_numpy_array(__lowerCamelCase ):
return np.expand_dims(__lowerCamelCase , __lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.unsqueeze(dim=__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(__lowerCamelCase , axis=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.expand_dims(__lowerCamelCase , axis=__lowerCamelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(__lowerCamelCase )}.''' )
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if is_numpy_array(__lowerCamelCase ):
return np.size(__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.numel()
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.size(__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(__lowerCamelCase )}.''' )
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(__lowerCamelCase , (tuple, list) ):
lowerCamelCase__ : Optional[int] =[f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCamelCase__ : Tuple =f'''{repo_id}--{value}'''
return auto_map
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
for base_class in inspect.getmro(__lowerCamelCase ):
lowerCamelCase__ : Tuple =base_class.__module__
lowerCamelCase__ : Tuple =base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 238
|
"""simple docstring"""
from functools import lru_cache
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =2
lowerCamelCase__ : Optional[int] =set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowerCamelCase )
if n > 1:
factors.add(__lowerCamelCase )
return factors
@lru_cache
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
return len(unique_prime_factors(__lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
return len(set(__lowerCamelCase ) ) in (0, 1)
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Tuple =2
while True:
# Increment each value of a generated range
lowerCamelCase__ : Tuple =[base + i for i in range(__lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCamelCase__ : Optional[Any] =[upf_len(__lowerCamelCase ) for x in group]
checker.append(__lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def snake_case__ ( __lowerCamelCase : int = 4 ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =run(__lowerCamelCase )
return results[0] if len(__lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 238
| 1
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = 99
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 37
SCREAMING_SNAKE_CASE_ = '''gelu'''
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = 512
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 0.02
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = None
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = TFDistilBertModel(config=_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFDistilBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> str:
SCREAMING_SNAKE_CASE_ = TFDistilBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFDistilBertForSequenceClassification(_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = TFDistilBertForMultipleChoice(_A )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFDistilBertForTokenClassification(_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCAmelCase_ =(
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFDistilBertModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , dim=37 )
def _UpperCamelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_A )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_A )
@slow
def _UpperCamelCase ( self ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
SCREAMING_SNAKE_CASE_ = TFDistilBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
SCREAMING_SNAKE_CASE_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ = model(_A )[0]
SCREAMING_SNAKE_CASE_ = [1, 6, 768]
self.assertEqual(output.shape , _A )
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
| 257
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
SCREAMING_SNAKE_CASE_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , model.config.pad_token_id , model.config.decoder_start_token_id )
SCREAMING_SNAKE_CASE_ = model(_A , decoder_input_ids=_A ).logits
SCREAMING_SNAKE_CASE_ = optax.softmax_cross_entropy(_A , onehot(_A , logits.shape[-1] ) ).mean()
SCREAMING_SNAKE_CASE_ = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 257
| 1
|
from math import isqrt
def _lowerCAmelCase ( __lowerCAmelCase ) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(__lowerCAmelCase ) + 1 ) )
def _lowerCAmelCase ( __lowerCAmelCase = 10**6 ) -> int:
"""simple docstring"""
snake_case__ : Tuple = 0
snake_case__ : List[Any] = 1
snake_case__ : List[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(__lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 230
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCamelCase__ ( a = True , *a , **a ) -> Optional[Any]:
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
_A: Optional[Any] = False
if main_process_only:
_A: Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*a , **a , disable=a )
| 121
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCAmelCase__ : Any =None
lowerCAmelCase__ : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Tuple ={'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ : Any ={
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ : Union[str, Any] ={
'google/rembert': 2_56,
}
lowerCAmelCase__ : Optional[Any] ='▁'
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = RemBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : int = do_lower_case
SCREAMING_SNAKE_CASE_ : Dict = remove_space
SCREAMING_SNAKE_CASE_ : Optional[int] = keep_accents
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = False if not self.vocab_file else True
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 162
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {'shortest_edge': 1_8}
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : str = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : str = min_resolution
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_resolution
SCREAMING_SNAKE_CASE_ : int = do_resize
SCREAMING_SNAKE_CASE_ : List[Any] = size
SCREAMING_SNAKE_CASE_ : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE_ : Any = crop_size
SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : List[str] = image_mean
SCREAMING_SNAKE_CASE_ : Optional[int] = image_std
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = LevitImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : str = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 162
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class a ( _lowerCamelCase ):
def A_ ( self : Union[str, Any] ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = 8
# DPR tok
snake_case_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
snake_case_ = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
snake_case_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case_ = {'''unk_token''': '''<unk>'''}
snake_case_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
def A_ ( self : Any ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def A_ ( self : Dict ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def A_ ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def A_ ( self : Union[str, Any] ):
snake_case_ = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
snake_case_ = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
snake_case_ = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowercase_ )
rag_tokenizer.save_pretrained(lowercase_ )
snake_case_ = RagTokenizer.from_pretrained(lowercase_ , config=lowercase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowercase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowercase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def A_ ( self : List[Any] ):
snake_case_ = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
snake_case_ = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
snake_case_ = tokenizer(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
def A_ ( self : Dict ):
snake_case_ = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
snake_case_ = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
snake_case_ = tokenizer(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 56
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE_: Optional[int] =str(bin(lowercase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_: Any =str(bin(lowercase ) )[2:]
SCREAMING_SNAKE_CASE_: Dict =max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173
| 0
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 2_50
UpperCAmelCase = ids_tensor((batch_size, length) , snake_case__ )
UpperCAmelCase = torch.ones((batch_size, length) , device=snake_case__ , dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(snake_case__ , snake_case__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(snake_case__ , snake_case__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(snake_case__ , snake_case__ ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(snake_case__ , snake_case__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(snake_case__ , snake_case__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(snake_case__ , snake_case__ ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(snake_case__ , snake_case__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(snake_case__ , snake_case__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(snake_case__ , snake_case__ ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(snake_case__ , snake_case__ ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(snake_case__ , snake_case__ ) )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(snake_case__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(snake_case__ ) , 1 )
| 248
|
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 248
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str]=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ""
else:
__lowerCAmelCase = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _a ( SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = val
def _a ( ):
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 10_00
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 3_84
__lowerCAmelCase = 15_36
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load("facebookresearch/dino:main" , SCREAMING_SNAKE_CASE_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(SCREAMING_SNAKE_CASE_ , add_pooling_layer=SCREAMING_SNAKE_CASE_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = encoding["pixel_values"]
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )
if base_model:
__lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
UpperCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 92
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = 0
if start < end:
__lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = a[end]
__lowerCAmelCase = a[pivot]
__lowerCAmelCase = temp
__lowerCAmelCase , __lowerCAmelCase = _in_place_partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , p + 1 , SCREAMING_SNAKE_CASE_ )
return count
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = 0
__lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = a[end]
__lowerCAmelCase = a[pivot]
__lowerCAmelCase = temp
__lowerCAmelCase = start - 1
for index in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__lowerCAmelCase = new_pivot_index + 1
__lowerCAmelCase = a[new_pivot_index]
__lowerCAmelCase = a[index]
__lowerCAmelCase = temp
__lowerCAmelCase = a[new_pivot_index + 1]
__lowerCAmelCase = a[end]
__lowerCAmelCase = temp
return new_pivot_index + 1, count
UpperCamelCase__ = TemporaryFile()
UpperCamelCase__ = 100 # 1000 elements are to be sorted
UpperCamelCase__ , UpperCamelCase__ = 0, 1 # mean and standard deviation
UpperCamelCase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
UpperCamelCase__ = np.load(outfile)
UpperCamelCase__ = len(M) - 1
UpperCamelCase__ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 92
| 1
|
"""simple docstring"""
from collections import deque
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = len(UpperCAmelCase_ )
A__ = deque()
A__ = [False for _ in range(UpperCAmelCase_ )]
A__ = [-1 for _ in range(UpperCAmelCase_ )]
A__ = index_of[:]
def strong_connect(UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
A__ = index # the number when this node is seen
A__ = index # lowest rank node reachable from here
index += 1
stack.append(UpperCAmelCase_ )
A__ = True
for w in g[v]:
if index_of[w] == -1:
A__ = strong_connect(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A__ = []
A__ = stack.pop()
A__ = False
component.append(UpperCAmelCase_ )
while w != v:
A__ = stack.pop()
A__ = False
component.append(UpperCAmelCase_ )
components.append(UpperCAmelCase_ )
return index
A__ = []
for v in range(UpperCAmelCase_ ):
if index_of[v] == -1:
strong_connect(UpperCAmelCase_ , 0 , UpperCAmelCase_ )
return components
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
A__ = [[] for _ in range(UpperCAmelCase_ )]
for u, v in edges:
g[u].append(UpperCAmelCase_ )
return g
if __name__ == "__main__":
# Test
SCREAMING_SNAKE_CASE_ : str = 7
SCREAMING_SNAKE_CASE_ : List[Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
SCREAMING_SNAKE_CASE_ : Any = [(u, v) for u, v in zip(source, target)]
SCREAMING_SNAKE_CASE_ : List[Any] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 69
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[int] , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 69
| 1
|
from __future__ import annotations
def __lowercase ( a__ , a__ = None , a__ = None , a__ = False , ) -> tuple[int, float, str]:
__SCREAMING_SNAKE_CASE = cipher_alphabet or [chr(a__ ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__SCREAMING_SNAKE_CASE = {
'a': 0.08497,
'b': 0.01492,
'c': 0.02202,
'd': 0.04253,
'e': 0.11162,
'f': 0.02228,
'g': 0.02015,
'h': 0.06094,
'i': 0.07546,
'j': 0.00153,
'k': 0.01292,
'l': 0.04025,
'm': 0.02406,
'n': 0.06749,
'o': 0.07507,
'p': 0.01929,
'q': 0.00095,
'r': 0.07587,
's': 0.06327,
't': 0.09356,
'u': 0.02758,
'v': 0.00978,
'w': 0.02560,
'x': 0.00150,
'y': 0.01994,
'z': 0.00077,
}
else:
# Custom frequencies dictionary
__SCREAMING_SNAKE_CASE = frequencies_dict
if not case_sensitive:
__SCREAMING_SNAKE_CASE = ciphertext.lower()
# Chi squared statistic values
__SCREAMING_SNAKE_CASE = {}
# cycle through all of the shifts
for shift in range(len(a__ ) ):
__SCREAMING_SNAKE_CASE = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__SCREAMING_SNAKE_CASE = (alphabet_letters.index(letter.lower() ) - shift) % len(
a__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__SCREAMING_SNAKE_CASE = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__SCREAMING_SNAKE_CASE = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__SCREAMING_SNAKE_CASE = decrypted_with_shift.lower().count(a__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__SCREAMING_SNAKE_CASE = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__SCREAMING_SNAKE_CASE = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__SCREAMING_SNAKE_CASE = decrypted_with_shift.count(a__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__SCREAMING_SNAKE_CASE = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__SCREAMING_SNAKE_CASE = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__SCREAMING_SNAKE_CASE = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(a__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__SCREAMING_SNAKE_CASE = min(
a__ , key=a__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 257
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = process
__SCREAMING_SNAKE_CASE = params
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.dataset[i]
__SCREAMING_SNAKE_CASE = self.process(_A , **self.params )
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = loader
__SCREAMING_SNAKE_CASE = infer
__SCREAMING_SNAKE_CASE = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = loader_batch_size
# Internal bookkeeping
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __len__( self ):
'''simple docstring'''
return len(self.loader )
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def _A ( self ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__SCREAMING_SNAKE_CASE = {}
for k, element in self._loader_batch_data.items():
if isinstance(_A , _A ):
# Convert ModelOutput to tuple first
__SCREAMING_SNAKE_CASE = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__SCREAMING_SNAKE_CASE = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__SCREAMING_SNAKE_CASE = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(_A )
self._loader_batch_index += 1
return result
def _A ( self ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__SCREAMING_SNAKE_CASE = next(self.iterator )
__SCREAMING_SNAKE_CASE = self.infer(_A , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_A , torch.Tensor ):
__SCREAMING_SNAKE_CASE = processed
else:
__SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE = processed[key]
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = len(_A )
else:
__SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE = observed_batch_size
# Setting internal index to unwrap the batch
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
__SCREAMING_SNAKE_CASE = None
return self
def _A ( self ):
'''simple docstring'''
if self.subiterator is None:
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__SCREAMING_SNAKE_CASE = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
__SCREAMING_SNAKE_CASE = next(self.subiterator )
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE = self.loader_batch_item()
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
if is_last:
return accumulator
while not is_last:
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_A , torch.Tensor ):
__SCREAMING_SNAKE_CASE = processed
else:
__SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE = processed[key]
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = len(_A )
else:
__SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE = observed_batch_size
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = 0
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE = self.loader_batch_item()
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
if is_last:
return accumulator
else:
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
return accumulator
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = key
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return self.dataset[i][self.key]
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = keya
__SCREAMING_SNAKE_CASE = keya
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 257
| 1
|
'''simple docstring'''
import math
import qiskit
def __lowerCAmelCase (__lowerCAmelCase = 1 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1 ):
if (
isinstance(lowercase__ , lowercase__ )
or isinstance(lowercase__ , lowercase__ )
or isinstance(lowercase__ , lowercase__ )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(lowercase__ ) != input_a)
or (math.floor(lowercase__ ) != input_a)
or (math.floor(lowercase__ ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
_UpperCAmelCase : Union[str, Any] = qiskit.QuantumRegister(4 , "qr" )
_UpperCAmelCase : Optional[Any] = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
_UpperCAmelCase : str = [input_a, input_a, carry_in]
_UpperCAmelCase : Optional[Any] = qiskit.QuantumCircuit(lowercase__ , lowercase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowercase__ ) # measure the last two qbits
_UpperCAmelCase : List[Any] = qiskit.Aer.get_backend("aer_simulator" )
_UpperCAmelCase : Optional[Any] = qiskit.execute(lowercase__ , lowercase__ , shots=1_000 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 356
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Tuple=4 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Dict = use_attention_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = num_choices
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_attention_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs
_UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class_name.from_pretrained("albert-base-v2" )
_UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_UpperCAmelCase : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_UpperCAmelCase : List[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
| 322
| 0
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase = logging.getLogger(__name__)
class A__ ( _snake_case ):
lowercase = "summarization"
lowercase = ["loss"]
lowercase = ROUGE_KEYS
lowercase = "rouge2"
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
A_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(UpperCamelCase__ , num_labels=UpperCamelCase__ , mode=self.mode , **UpperCamelCase__ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
A_ = Path(self.output_dir ) / """metrics.json"""
A_ = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
A_ = 0
A_ = defaultdict(UpperCamelCase__ )
A_ = self.config.model_type
A_ = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
A_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
A_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ = get_git_info()["""repo_sha"""]
A_ = hparams.num_workers
A_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCamelCase__ ):
A_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ = self.decoder_start_token_id
A_ = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
A_ = False
A_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ = self.hparams.eval_max_gen_length
else:
A_ = self.model.config.max_length
A_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case_ ( self , UpperCamelCase__ ) -> Dict[str, List[str]]:
'''simple docstring'''
A_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(UpperCamelCase__ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
A_ = True
return readable_batch
def snake_case_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.model(UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = self.tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return lmap(str.strip , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.tokenizer.pad_token_id
A_ , A_ = batch["""input_ids"""], batch["""attention_mask"""]
A_ = batch["""labels"""]
if isinstance(self.model , UpperCamelCase__ ):
A_ = self.model._shift_right(UpperCamelCase__ )
else:
A_ = shift_tokens_right(UpperCamelCase__ , UpperCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ = decoder_input_ids
self.save_readable_batch(UpperCamelCase__ )
A_ = self(UpperCamelCase__ , attention_mask=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , use_cache=UpperCamelCase__ )
A_ = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
A_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
A_ , A_ = label_smoothed_nll_loss(
UpperCamelCase__ , UpperCamelCase__ , self.hparams.label_smoothing , ignore_index=UpperCamelCase__ )
return (loss,)
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self._step(UpperCamelCase__ )
A_ = dict(zip(self.loss_names , UpperCamelCase__ ) )
# tokens per batch
A_ = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
A_ = batch["""input_ids"""].shape[0]
A_ = batch["""input_ids"""].eq(self.pad ).sum()
A_ = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
A_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ = losses["""loss"""]
A_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
A_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ = torch.tensor(UpperCamelCase__ ).type_as(UpperCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCamelCase__ )
A_ = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ = self.step_count
self.metrics[prefix].append(UpperCamelCase__ ) # callback writes this to self.metrics_save_path
A_ = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return calculate_rouge(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> dict:
'''simple docstring'''
A_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=UpperCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ = (time.time() - ta) / batch["""input_ids"""].shape[0]
A_ = self.ids_to_clean_text(UpperCamelCase__ )
A_ = self.ids_to_clean_text(batch["""labels"""] )
A_ = self._step(UpperCamelCase__ )
A_ = dict(zip(self.loss_names , UpperCamelCase__ ) )
A_ = self.calc_generative_metrics(UpperCamelCase__ , UpperCamelCase__ )
A_ = np.mean(lmap(UpperCamelCase__ , UpperCamelCase__ ) )
base_metrics.update(gen_time=UpperCamelCase__ , gen_len=UpperCamelCase__ , preds=UpperCamelCase__ , target=UpperCamelCase__ , **UpperCamelCase__ )
return base_metrics
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.validation_epoch_end(UpperCamelCase__ , prefix="""test""" )
def snake_case_ ( self , UpperCamelCase__ ) -> SeqaSeqDataset:
'''simple docstring'''
A_ = self.n_obs[type_path]
A_ = self.target_lens[type_path]
A_ = self.dataset_class(
self.tokenizer , type_path=UpperCamelCase__ , n_obs=UpperCamelCase__ , max_target_length=UpperCamelCase__ , **self.dataset_kwargs , )
return dataset
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
'''simple docstring'''
A_ = self.get_dataset(UpperCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ = dataset.make_sortish_sampler(UpperCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
def snake_case_ ( self ) -> DataLoader:
'''simple docstring'''
A_ = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
return dataloader
def snake_case_ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def snake_case_ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
add_generic_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"""--max_source_length""" , default=1024 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=UpperCamelCase__ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=UpperCamelCase__ )
parser.add_argument("""--max_tokens_per_batch""" , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument("""--logger_name""" , type=UpperCamelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=UpperCamelCase__ , default=500 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=UpperCamelCase__ , default="""summarization""" , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=UpperCamelCase__ , default=0.0 , required=UpperCamelCase__ )
parser.add_argument("""--src_lang""" , type=UpperCamelCase__ , default="""""" , required=UpperCamelCase__ )
parser.add_argument("""--tgt_lang""" , type=UpperCamelCase__ , default="""""" , required=UpperCamelCase__ )
parser.add_argument("""--eval_beams""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ )
parser.add_argument(
"""--val_metric""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=UpperCamelCase__ , default=1 , required=UpperCamelCase__ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class A__ ( _snake_case ):
lowercase = "translation"
lowercase = ["loss"]
lowercase = ["bleu"]
lowercase = "bleu"
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
super().__init__(UpperCamelCase__ , **UpperCamelCase__ )
A_ = hparams.src_lang
A_ = hparams.tgt_lang
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> dict:
'''simple docstring'''
return calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=UpperCAmelCase__ )
check_output_dir(UpperCAmelCase__, expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ = SummarizationModule(UpperCAmelCase__ )
else:
A_ = TranslationModule(UpperCAmelCase__ )
A_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
A_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ = os.environ.get("""WANDB_PROJECT""", UpperCAmelCase__ )
A_ = WandbLogger(name=model.output_dir.name, project=UpperCAmelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
A_ = False
A_ = args.val_metric == """loss"""
A_ = generic_train(
UpperCAmelCase__, UpperCAmelCase__, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, UpperCAmelCase__ ), early_stopping_callback=UpperCAmelCase__, logger=UpperCAmelCase__, )
pickle_save(model.hparams, model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
A_ = """"""
A_ = sorted(glob.glob(os.path.join(args.output_dir, """*.ckpt""" ), recursive=UpperCAmelCase__ ) )
if checkpoints:
A_ = checkpoints[-1]
A_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase = parser.parse_args()
main(args)
| 162
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=True, UpperCAmelCase__="pt" ) -> str:
A_ = {"""add_prefix_space""": True} if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and not line.startswith(""" """ ) else {}
A_ = padding_side
return tokenizer(
[line], max_length=UpperCAmelCase__, padding="""max_length""" if pad_to_max_length else None, truncation=UpperCAmelCase__, return_tensors=UpperCAmelCase__, add_special_tokens=UpperCAmelCase__, **UpperCAmelCase__, )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, ) -> List[str]:
A_ = input_ids.ne(UpperCAmelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="train" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="" , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = Path(UpperCamelCase__ ).joinpath(type_path + """.source""" )
A_ = Path(UpperCamelCase__ ).joinpath(type_path + """.target""" )
A_ = self.get_char_lens(self.src_file )
A_ = max_source_length
A_ = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
A_ = tokenizer
A_ = prefix
if n_obs is not None:
A_ = self.src_lens[:n_obs]
A_ = src_lang
A_ = tgt_lang
def __len__( self ) -> Dict:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , UpperCamelCase__ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A_ = index + 1 # linecache starts at 1
A_ = self.prefix + linecache.getline(str(self.src_file ) , UpperCamelCase__ ).rstrip("""\n""" )
A_ = linecache.getline(str(self.tgt_file ) , UpperCamelCase__ ).rstrip("""\n""" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCamelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer
)
A_ = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer
A_ = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_source_length , """right""" )
A_ = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_target_length , """right""" )
A_ = source_inputs["""input_ids"""].squeeze()
A_ = target_inputs["""input_ids"""].squeeze()
A_ = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Any:
'''simple docstring'''
return [len(UpperCamelCase__ ) for x in Path(UpperCamelCase__ ).open().readlines()]
def snake_case_ ( self , UpperCamelCase__ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A_ = torch.stack([x["""input_ids"""] for x in batch] )
A_ = torch.stack([x["""attention_mask"""] for x in batch] )
A_ = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase__ )
else self.tokenizer.pad_token_id
)
A_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase__ )
else self.tokenizer.pad_token_id
)
A_ = trim_batch(UpperCamelCase__ , UpperCamelCase__ )
A_ , A_ = trim_batch(UpperCamelCase__ , UpperCamelCase__ , attention_mask=UpperCamelCase__ )
A_ = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__lowerCamelCase = getLogger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
return list(itertools.chain.from_iterable(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
A_ = get_git_info()
save_json(UpperCAmelCase__, os.path.join(UpperCAmelCase__, """git_log.json""" ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=4, **UpperCAmelCase__ ) -> Dict:
with open(UpperCAmelCase__, """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__, indent=UpperCAmelCase__, **UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
with open(UpperCAmelCase__ ) as f:
return json.load(UpperCAmelCase__ )
def UpperCAmelCase__ ( ) -> Any:
A_ = git.Repo(search_parent_directories=UpperCAmelCase__ )
A_ = {
"""repo_id""": str(UpperCAmelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List:
return list(map(UpperCAmelCase__, UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
with open(UpperCAmelCase__, """wb""" ) as f:
return pickle.dump(UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
def remove_articles(UpperCAmelCase__ ):
return re.sub(r"""\b(a|an|the)\b""", """ """, UpperCAmelCase__ )
def white_space_fix(UpperCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase__ ):
A_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__ ) ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
A_ = normalize_answer(UpperCAmelCase__ ).split()
A_ = normalize_answer(UpperCAmelCase__ ).split()
A_ = Counter(UpperCAmelCase__ ) & Counter(UpperCAmelCase__ )
A_ = sum(common.values() )
if num_same == 0:
return 0
A_ = 1.0 * num_same / len(UpperCAmelCase__ )
A_ = 1.0 * num_same / len(UpperCAmelCase__ )
A_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
return normalize_answer(UpperCAmelCase__ ) == normalize_answer(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = 0
for hypo, pred in zip(UpperCAmelCase__, UpperCAmelCase__ ):
em += exact_match_score(UpperCAmelCase__, UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
em /= len(UpperCAmelCase__ )
return {"em": em}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
return model_prefix.startswith("""rag""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
A_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ = """dropout_rate"""
for p in extra_params:
if getattr(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ):
if not hasattr(UpperCAmelCase__, UpperCAmelCase__ ) and not hasattr(UpperCAmelCase__, equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(UpperCAmelCase__ ) )
delattr(UpperCAmelCase__, UpperCAmelCase__ )
continue
A_ = p if hasattr(UpperCAmelCase__, UpperCAmelCase__ ) else equivalent_param[p]
setattr(UpperCAmelCase__, UpperCAmelCase__, getattr(UpperCAmelCase__, UpperCAmelCase__ ) )
delattr(UpperCAmelCase__, UpperCAmelCase__ )
return hparams, config
| 162
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 16
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def __A ( self ) -> Any:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""},
] , )
__UpperCAmelCase : List[str] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] , )
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
__UpperCAmelCase : Dict = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
__UpperCAmelCase : str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
] , )
__UpperCAmelCase : Optional[int] = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
__UpperCAmelCase : str = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
@require_torch
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(__UpperCAmelCase )
@slow
@require_tf
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""},
] , )
__UpperCAmelCase : Optional[int] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] , )
__UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : int = None
self.run_pipeline_test(__UpperCAmelCase , [] )
@require_tf
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : str = None
self.run_pipeline_test(__UpperCAmelCase , [] )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
__UpperCAmelCase : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : int = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = fill_masker.tokenizer
__UpperCAmelCase : Union[str, Any] = fill_masker.model
__UpperCAmelCase : Tuple = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : int = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : Union[str, Any] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
__UpperCAmelCase , [
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
] , )
with self.assertRaises(__UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__UpperCAmelCase ):
fill_masker("""This is""" )
self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase )
self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase )
self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase )
self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Dict = tokenizer.get_vocab()
__UpperCAmelCase : Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
__UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase )
__UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : Any = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase )
__UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) )
# Call argument
__UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : List[Any] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase )
__UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) )
# Score equivalence
__UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase )
__UpperCAmelCase : Dict = [top_mask["""token_str"""] for top_mask in outputs]
__UpperCAmelCase : str = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase ) == set(__UpperCAmelCase ):
__UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase )
__UpperCAmelCase : int = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(__UpperCAmelCase ):
__UpperCAmelCase : Any = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__UpperCAmelCase ):
__UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] )
with self.assertRaises(__UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 )
__UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : int = tokenizer.get_vocab()
__UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
# top_k=2, ntargets=3
__UpperCAmelCase : Dict = sorted(vocab.keys() )[:3]
__UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=__UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__UpperCAmelCase : Tuple = [el["""token_str"""] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=__UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
__UpperCAmelCase : Dict = sorted(vocab.keys() )[:3]
__UpperCAmelCase : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__UpperCAmelCase : Optional[int] = fill_masker(f'My name is {tokenizer.mask_token}' , targets=__UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__UpperCAmelCase ) , 3 )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : Dict = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
] , )
| 16
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A__(unittest.TestCase ):
"""simple docstring"""
_A : List[str] = StableDiffusionLDMaDPipeline
_A : int = TEXT_TO_IMAGE_PARAMS
_A : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_A : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a_ : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
a_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ : Tuple = CLIPTextModel(_lowercase )
a_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Any:
if str(_lowercase ).startswith("""mps""" ):
a_ : Optional[Any] = torch.manual_seed(_lowercase )
else:
a_ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Any = self.get_dummy_components()
a_ : List[str] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : Union[str, Any] = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : int = self.get_dummy_inputs(_lowercase )
a_ : List[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : Tuple = output.rgb, output.depth
a_ : Union[str, Any] = rgb[0, -3:, -3:, -1]
a_ : Any = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a_ : Optional[Any] = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
a_ : int = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = self.get_dummy_components()
a_ : Optional[int] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : Optional[Any] = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Dict = self.get_dummy_inputs(_lowercase )
a_ : List[str] = 3 * [inputs["""prompt"""]]
# forward
a_ : Optional[int] = ldmad_pipe(**_lowercase )
a_ , a_ : Any = output.rgb, output.depth
a_ : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
a_ : Union[str, Any] = depth_slice_a[0, -3:, -1]
a_ : Dict = self.get_dummy_inputs(_lowercase )
a_ : List[str] = 3 * [inputs.pop("""prompt""" )]
a_ : List[Any] = ldmad_pipe.tokenizer(
_lowercase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="""pt""" , )
a_ : int = text_inputs["""input_ids"""].to(_lowercase )
a_ : Any = ldmad_pipe.text_encoder(_lowercase )[0]
a_ : Dict = prompt_embeds
# forward
a_ : int = ldmad_pipe(**_lowercase )
a_ , a_ : Optional[int] = output.rgb, output.depth
a_ : List[str] = rgb_slice_a[0, -3:, -3:, -1]
a_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase__ ( self ) -> Dict:
a_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Dict = self.get_dummy_components()
a_ : Any = PNDMScheduler(skip_prk_steps=_lowercase )
a_ : List[str] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : str = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[Any] = self.get_dummy_inputs(_lowercase )
a_ : int = """french fries"""
a_ : Any = ldmad_pipe(**_lowercase , negative_prompt=_lowercase )
a_ , a_ : Optional[Any] = output.rgb, output.depth
a_ : Tuple = rgb[0, -3:, -3:, -1]
a_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a_ : Optional[int] = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
a_ : Union[str, Any] = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ) -> List[str]:
a_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Dict = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
a_ : Tuple = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
a_ : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Any:
a_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
a_ : str = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Dict = self.get_inputs(_lowercase )
a_ : Optional[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : int = output.rgb, output.depth
a_ : str = rgb[0, -3:, -3:, -1].flatten()
a_ : Tuple = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
a_ : Optional[int] = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
a_ : Optional[int] = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ) -> str:
a_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Tuple = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
a_ : Any = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
a_ : Dict = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[str] = self.get_inputs(_lowercase )
a_ : Union[str, Any] = ldmad_pipe(**_lowercase )
a_ , a_ : str = output.rgb, output.depth
a_ : List[str] = 0.4_9_5_5_8_6
a_ : int = 0.3_3_7_9_5_5_1_5
a_ : int = 1_1_2.4_8_5_1_8
a_ : Optional[int] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[str] = self.get_inputs(_lowercase )
a_ : List[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : List[Any] = output.rgb, output.depth
a_ : int = 0.4_1_9_4_1_2_7
a_ : List[str] = 0.3_5_3_7_5_5_8_6
a_ : Optional[int] = 0.5_6_3_8_5_0_2
a_ : str = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 248
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__snake_case : Dict = """<<<<<<< This should probably be modified because it mentions: """
__snake_case : Any = """=======
>>>>>>>
"""
__snake_case : Any = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
__snake_case : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def _UpperCAmelCase ( a__):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory)
class A__(a_ ):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( _lowercase ) -> Dict:
a_ : Optional[Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=_lowercase , required=_lowercase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=_lowercase , required=_lowercase , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase , _lowercase , *_lowercase ) -> str:
a_ : List[Any] = get_logger("""datasets-cli/converting""" )
a_ : Optional[Any] = tfds_path
a_ : List[Any] = datasets_directory
def UpperCamelCase__ ( self ) -> Dict:
if os.path.isdir(self._tfds_path ):
a_ : List[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a_ : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
a_ : List[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a_ : Dict = []
a_ : Tuple = []
a_ : str = {}
if os.path.isdir(self._tfds_path ):
a_ : str = os.listdir(_lowercase )
else:
a_ : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
a_ : List[str] = os.path.join(_lowercase , _lowercase )
a_ : Dict = os.path.join(_lowercase , _lowercase )
if not os.path.isfile(_lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(_lowercase , encoding="""utf-8""" ) as f:
a_ : Any = f.readlines()
a_ : Any = []
a_ : str = False
a_ : List[str] = False
a_ : List[Any] = []
for line in lines:
a_ : Union[str, Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a_ : List[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
a_ : Optional[int] = """"""
continue
elif "from absl import logging" in out_line:
a_ : List[str] = """from datasets import logging\n"""
elif "getLogger" in out_line:
a_ : List[str] = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a_ : Dict = True
a_ : Optional[Any] = list(filter(lambda _lowercase : e in out_line , _lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowercase ) + """\n""" )
out_lines.append(_lowercase )
out_lines.append(_lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
a_ : List[str] = re.sub(_lowercase , _lowercase , _lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a_ : Tuple = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , _lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
a_ : Optional[int] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a_ : Optional[Any] = True
out_lines.append(_lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a_ : List[str] = f_name.replace(""".py""" , """""" )
a_ : Optional[int] = os.path.join(_lowercase , _lowercase )
a_ : Dict = os.path.join(_lowercase , _lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowercase )
if needs_manual_update:
with_manual_update.append(_lowercase )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.writelines(_lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a_ : Optional[int] = os.path.basename(_lowercase )
a_ : List[Any] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_lowercase , _lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 248
| 1
|
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__snake_case = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def __lowerCAmelCase ( lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
__snake_case = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def __lowerCAmelCase ( lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Union[str, Any] = list(s_dict.keys() )
for key in keys:
snake_case : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case : int = new_key.replace(lowercase , lowercase )
print(F'{key} -> {new_key}' )
snake_case : str = s_dict.pop(lowercase )
return s_dict
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case ,snake_case : List[str] = emb.weight.shape
snake_case : Any = nn.Linear(lowercase , lowercase , bias=lowercase )
snake_case : Dict = emb.weight.data
return lin_layer
def __lowerCAmelCase ( lowercase : str , lowercase : str ) -> bytes:
"""simple docstring"""
os.makedirs(lowercase , exist_ok=lowercase )
snake_case : Any = os.path.basename(lowercase )
snake_case : List[Any] = url.split("/" )[-2]
snake_case : Union[str, Any] = os.path.join(lowercase , lowercase )
if os.path.exists(lowercase ) and not os.path.isfile(lowercase ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(lowercase ):
snake_case : str = open(lowercase , "rb" ).read()
if hashlib.shaaaa(lowercase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(lowercase ) as source, open(lowercase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowercase , unit_divisor=1024 ) as loop:
while True:
snake_case : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowercase )
loop.update(len(lowercase ) )
snake_case : Optional[int] = open(lowercase , "rb" ).read()
if hashlib.shaaaa(lowercase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def __lowerCAmelCase ( lowercase : List[str] , lowercase : Dict ) -> Tuple:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case : Dict = _download(_MODELS[checkpoint_path] )
else:
snake_case : str = torch.load(lowercase , map_location="cpu" )
snake_case : List[str] = original_checkpoint["dims"]
snake_case : int = original_checkpoint["model_state_dict"]
snake_case : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowercase )
rename_keys(lowercase )
snake_case : Any = True
snake_case : int = state_dict["decoder.layers.0.fc1.weight"].shape[0]
snake_case : List[str] = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowercase , decoder_ffn_dim=lowercase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
snake_case : List[str] = WhisperForConditionalGeneration(lowercase )
snake_case ,snake_case : Dict = model.model.load_state_dict(lowercase , strict=lowercase )
if len(lowercase ) > 0 and not set(lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F' but all the following weights are missing {missing}' )
if tie_embeds:
snake_case : str = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case : str = proj_out_weights
model.save_pretrained(lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__snake_case = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 112
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__snake_case = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30522, type=int)
__snake_case = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, """rb""") as fp:
__snake_case = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
__snake_case = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case = [0] * args.vocab_size
for k, v in counter.items():
__snake_case = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 112
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__UpperCamelCase = (3, 9, -11, 0, 7, 5, 1, -1)
__UpperCamelCase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> None:
snake_case_ = None
for i in sorted(lowerCAmelCase__, reverse=lowerCAmelCase__):
snake_case_ = Node(lowerCAmelCase__, self.head)
def __iter__( self) -> Iterator[int]:
snake_case_ = self.head
while node:
yield node.data
snake_case_ = node.next_node
def __len__( self) -> int:
return sum(1 for _ in self)
def __str__( self) -> str:
return " -> ".join([str(lowerCAmelCase__) for node in self])
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> SortedLinkedList:
return SortedLinkedList(list(UpperCAmelCase ) + list(UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 69
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = (DPMSolverSinglestepScheduler,)
SCREAMING_SNAKE_CASE_ = (("num_inference_steps", 2_5),)
def a_ ( self, **lowerCAmelCase__) -> int:
snake_case_ = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf'),
'variance_type': None,
}
config.update(**lowerCAmelCase__)
return config
def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> List[Any]:
snake_case_ = dict(self.forward_default_kwargs)
snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__)
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__)
snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__)
new_scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ , snake_case_ = sample, sample
for t in range(lowerCAmelCase__, time_step + scheduler.config.solver_order + 1):
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def a_ ( self) -> Union[str, Any]:
pass
def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> int:
snake_case_ = dict(self.forward_default_kwargs)
snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__)
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowerCAmelCase__)
scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__)
snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def a_ ( self, lowerCAmelCase__=None, **lowerCAmelCase__) -> Union[str, Any]:
if scheduler is None:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
return sample
def a_ ( self) -> List[Any]:
snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
snake_case_ = 50
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2574) < 1e-3
def a_ ( self) -> Dict:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
snake_case_ = self.full_loop(scheduler=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2791) < 1e-3
snake_case_ = DEISMultistepScheduler.from_config(scheduler.config)
snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config)
snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
snake_case_ = self.full_loop(scheduler=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2791) < 1e-3
def a_ ( self) -> str:
self.check_over_configs(thresholding=lowerCAmelCase__)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase__, prediction_type=lowerCAmelCase__, sample_max_value=lowerCAmelCase__, algorithm_type='dpmsolver++', solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, )
def a_ ( self) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, algorithm_type=lowerCAmelCase__, )
snake_case_ = self.full_loop(
solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, algorithm_type=lowerCAmelCase__, )
assert not torch.isnan(lowerCAmelCase__).any(), "Samples have nan numbers"
def a_ ( self) -> Optional[Any]:
self.check_over_configs(lower_order_final=lowerCAmelCase__)
self.check_over_configs(lower_order_final=lowerCAmelCase__)
def a_ ( self) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf'))
self.check_over_configs(lambda_min_clipped=-5.1)
def a_ ( self) -> Any:
self.check_over_configs(variance_type=lowerCAmelCase__)
self.check_over_configs(variance_type='learned_range')
def a_ ( self) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase__, time_step=0)
def a_ ( self) -> int:
snake_case_ = self.full_loop()
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2791) < 1e-3
def a_ ( self) -> Dict:
snake_case_ = self.full_loop(use_karras_sigmas=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2248) < 1e-3
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.full_loop(prediction_type='v_prediction')
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.1453) < 1e-3
def a_ ( self) -> Optional[Any]:
snake_case_ = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.0649) < 1e-3
def a_ ( self) -> Optional[int]:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(thresholding=lowerCAmelCase__, dynamic_thresholding_ratio=0)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
assert sample.dtype == torch.floataa
| 69
| 1
|
'''simple docstring'''
from math import ceil
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =list(range(0, UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : Dict =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
for i in device_map_blocks:
if device_map_blocks.count(UpperCamelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(UpperCamelCase__ )
# Missing blocks
SCREAMING_SNAKE_CASE__ : str =[i for i in blocks if i not in device_map_blocks]
SCREAMING_SNAKE_CASE__ : Tuple =[i for i in device_map_blocks if i not in blocks]
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(UpperCamelCase__ ) )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =list(range(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : Dict =int(ceil(n_layers / len(UpperCamelCase__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[layers[i : i + n_blocks] for i in range(0, UpperCamelCase__, UpperCamelCase__ )]
return dict(zip(UpperCamelCase__, UpperCamelCase__ ) )
| 222
|
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , __lowercase : set[int] , __lowercase : Mapping[EdgeT, int] ) -> None:
SCREAMING_SNAKE_CASE__ : set[int] =vertices
SCREAMING_SNAKE_CASE__ : dict[EdgeT, int] ={
(min(__lowercase ), max(__lowercase )): weight for edge, weight in edges.items()
}
def __magic_name__ ( self : Union[str, Any] , __lowercase : EdgeT , __lowercase : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
SCREAMING_SNAKE_CASE__ : List[str] =weight
def __magic_name__ ( self : Optional[Any] ) -> Graph:
SCREAMING_SNAKE_CASE__ : Graph =Graph({min(self.vertices )} , {} )
SCREAMING_SNAKE_CASE__ : EdgeT
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : EdgeT
SCREAMING_SNAKE_CASE__ : int
while len(subgraph.vertices ) < len(self.vertices ):
SCREAMING_SNAKE_CASE__ : Any =max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
SCREAMING_SNAKE_CASE__ : List[str] =edge
SCREAMING_SNAKE_CASE__ : Any =weight
subgraph.add_edge(__lowercase , __lowercase )
return subgraph
def _a( UpperCamelCase__ : str = "p107_network.txt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : str =os.path.join(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : dict[EdgeT, int] ={}
SCREAMING_SNAKE_CASE__ : list[str]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ : Any =f.read().strip().split('''\n''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[line.split(''',''' ) for line in data]
for edgea in range(1, len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
SCREAMING_SNAKE_CASE__ : List[Any] =int(adjaceny_matrix[edgea][edgea] )
SCREAMING_SNAKE_CASE__ : Graph =Graph(set(range(len(UpperCamelCase__ ) ) ), UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Graph =graph.prims_algorithm()
SCREAMING_SNAKE_CASE__ : int =sum(graph.edges.values() )
SCREAMING_SNAKE_CASE__ : int =sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 222
| 1
|
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
lowercase : int = max(SCREAMING_SNAKE_CASE__ )
lowercase : int = min(SCREAMING_SNAKE_CASE__ )
# create the counting array
lowercase : Dict = coll_max + 1 - coll_min
lowercase : Tuple = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowercase : Union[str, Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , SCREAMING_SNAKE_CASE__ ) ):
lowercase : Tuple = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
return "".join([chr(SCREAMING_SNAKE_CASE__ ) for i in counting_sort([ord(SCREAMING_SNAKE_CASE__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
lowercase : List[str] = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 20
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict:
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Optional[Any] = use_input_lengths
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: int = gelu_activation
__lowerCAmelCase: Optional[int] = sinusoidal_embeddings
__lowerCAmelCase: Tuple = causal
__lowerCAmelCase: Optional[Any] = asm
__lowerCAmelCase: int = n_langs
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: List[Any] = n_special
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: int = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Optional[int] = summary_type
__lowerCAmelCase: Any = use_proj
__lowerCAmelCase: Optional[Any] = scope
__lowerCAmelCase: Dict = bos_token_id
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Any = None
if self.use_input_lengths:
__lowerCAmelCase: Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase: str = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int:
__lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
__lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: List[Any] = self.num_choices
__lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = config_and_inputs
__lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = XLMModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: int = min_length + idx + 1
__lowerCAmelCase: Union[str, Any] = min_length + idx + 1
__lowerCAmelCase: Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: Any = min_length + idx + 1
__lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
__lowerCAmelCase: Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 322
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a: int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ) -> None:
super().__init__(**__lowerCAmelCase )
lowercase__ : int = size if size is not None else {'''shortest_edge''': 224}
lowercase__ : Union[str, Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : List[str] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name='''crop_size''' )
lowercase__ : List[Any] = do_resize
lowercase__ : Any = size
lowercase__ : Dict = resample
lowercase__ : List[Any] = do_center_crop
lowercase__ : int = crop_size
lowercase__ : Any = do_rescale
lowercase__ : List[str] = rescale_factor
lowercase__ : Tuple = do_normalize
lowercase__ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : int = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Tuple = do_convert_rgb
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
lowercase__ : Union[str, Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ : List[str] = get_resize_output_image_size(__lowerCAmelCase , size=size['''shortest_edge'''] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
lowercase__ : Dict = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> Optional[int]:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ) -> PIL.Image.Image:
lowercase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowercase__ : Optional[Any] = size if size is not None else self.size
lowercase__ : Dict = get_size_dict(__lowerCAmelCase , param_name='''size''' , default_to_square=__lowerCAmelCase )
lowercase__ : Union[str, Any] = resample if resample is not None else self.resample
lowercase__ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : str = get_size_dict(__lowerCAmelCase , param_name='''crop_size''' , default_to_square=__lowerCAmelCase )
lowercase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : str = image_mean if image_mean is not None else self.image_mean
lowercase__ : int = image_std if image_std is not None else self.image_std
lowercase__ : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : List[Any] = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : List[str] = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : List[str] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowercase__ : Any = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
lowercase__ : int = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
lowercase__ : Optional[Any] = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowercase__ : Tuple = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowercase__ : List[str] = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowercase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 214
|
'''simple docstring'''
from manim import *
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : int = Rectangle(height=0.5 , width=0.5 )
lowercase__ : Optional[int] = Rectangle(height=0.2_5 , width=0.2_5 )
lowercase__ : Tuple = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowercase__ : str = [mem.copy() for i in range(6 )]
lowercase__ : Dict = [mem.copy() for i in range(6 )]
lowercase__ : Tuple = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : List[str] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : str = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : str = Text('''CPU''' , font_size=24 )
lowercase__ : List[Any] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
lowercase__ : Any = [mem.copy() for i in range(4 )]
lowercase__ : int = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Optional[int] = Text('''GPU''' , font_size=24 )
lowercase__ : Tuple = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
lowercase__ : int = [mem.copy() for i in range(6 )]
lowercase__ : Optional[int] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Any = Text('''Model''' , font_size=24 )
lowercase__ : Optional[Any] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
lowercase__ : int = []
lowercase__ : int = []
lowercase__ : Any = []
for i, rect in enumerate(__lowerCAmelCase ):
rect.set_stroke(__lowerCAmelCase )
lowercase__ : Optional[int] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowerCAmelCase , buff=0.0 )
self.add(__lowerCAmelCase )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase )
lowercase__ : Optional[int] = [mem.copy() for i in range(6 )]
lowercase__ : List[Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Optional[Any] = Text('''Loaded Checkpoint''' , font_size=24 )
lowercase__ : Dict = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowerCAmelCase )
lowercase__ : str = []
lowercase__ : List[str] = []
for i, rect in enumerate(__lowerCAmelCase ):
lowercase__ : List[str] = fill.copy().set_fill(__lowerCAmelCase , opacity=0.7 )
target.move_to(__lowerCAmelCase )
ckpt_arr.append(__lowerCAmelCase )
lowercase__ : Any = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
lowercase__ : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ : Optional[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : str = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
lowercase__ : Union[str, Any] = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowercase__ : Tuple = [meta_mem.copy() for i in range(6 )]
lowercase__ : Any = [meta_mem.copy() for i in range(6 )]
lowercase__ : str = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Any = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Union[str, Any] = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Union[str, Any] = Text('''Disk''' , font_size=24 )
lowercase__ : Tuple = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , Write(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) )
lowercase__ : Tuple = []
for i, rect in enumerate(__lowerCAmelCase ):
lowercase__ : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(FadeOut(__lowerCAmelCase ) )
lowercase__ : Dict = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
self.play(
FadeOut(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase ) , )
self.wait()
| 214
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16
| 1
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[int] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE__ )} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
lowerCamelCase__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase__ = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
lowerCamelCase__ = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
lowerCamelCase__ = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
lowerCamelCase__ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowerCamelCase__ = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowerCamelCase__ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
lowerCamelCase__ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''train'''
lowerCamelCase__ = '''dev'''
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : Tuple , __magic_name__ : SquadDataTrainingArguments , __magic_name__ : PreTrainedTokenizer , __magic_name__ : Optional[int] = None , __magic_name__ : Union[str, Split] = Split.train , __magic_name__ : Optional[bool] = False , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = "pt" , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = args
SCREAMING_SNAKE_CASE_ = is_language_sensitive
SCREAMING_SNAKE_CASE_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__magic_name__ , __magic_name__ ):
try:
SCREAMING_SNAKE_CASE_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
SCREAMING_SNAKE_CASE_ = mode
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE_ = "v2" if args.version_2_with_negative else "v1"
SCREAMING_SNAKE_CASE_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ = cached_features_file + ".lock"
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = torch.load(__magic_name__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
SCREAMING_SNAKE_CASE_ = self.old_features["features"]
SCREAMING_SNAKE_CASE_ = self.old_features.get("dataset" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.old_features.get("examples" , __magic_name__ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
SCREAMING_SNAKE_CASE_ = self.processor.get_dev_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE_ = self.processor.get_train_examples(args.data_dir )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__magic_name__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __magic_name__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Any ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self : int , __magic_name__ : int ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
SCREAMING_SNAKE_CASE_ = self.features[i]
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.input_ids , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.attention_mask , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.cls_index , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.p_mask , dtype=torch.float )
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.is_impossible , dtype=torch.float )
SCREAMING_SNAKE_CASE_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.start_position , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 305
|
from math import pi, sqrt, tan
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
SCREAMING_SNAKE_CASE_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
SCREAMING_SNAKE_CASE_ = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 305
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase__ : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 112
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase__ : List[Any] = '''base_with_context'''
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Dict ):
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__SCREAMING_SNAKE_CASE : Tuple = weights[F"layers_{lyr_num}"]
__SCREAMING_SNAKE_CASE : str = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = ly_weight["""attention"""]
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: List[str] ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__SCREAMING_SNAKE_CASE : Tuple = weights[F"layers_{lyr_num}"]
__SCREAMING_SNAKE_CASE : Optional[int] = ly_weight["""attention"""]
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__SCREAMING_SNAKE_CASE : str = weights[F"layers_{lyr_num}"]
__SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[Any] = ly_weight["""self_attention"""]
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = ly_weight["""MultiHeadDotProductAttention_0"""]
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : int = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
__SCREAMING_SNAKE_CASE : Any = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
__SCREAMING_SNAKE_CASE : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
__SCREAMING_SNAKE_CASE : int = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
__SCREAMING_SNAKE_CASE : Tuple = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__SCREAMING_SNAKE_CASE : int = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f"{MODEL}/checkpoint_500000",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
UpperCamelCase__ : List[str] = parser.parse_args()
main(args)
| 112
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = "distilbert"
SCREAMING_SNAKE_CASE = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self : Dict , __SCREAMING_SNAKE_CASE : str=3_0_5_2_2 , __SCREAMING_SNAKE_CASE : Tuple=5_1_2 , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Any=6 , __SCREAMING_SNAKE_CASE : Optional[int]=1_2 , __SCREAMING_SNAKE_CASE : List[str]=7_6_8 , __SCREAMING_SNAKE_CASE : Optional[int]=4 * 7_6_8 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.0_2 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.2 , __SCREAMING_SNAKE_CASE : Dict=0 , **__SCREAMING_SNAKE_CASE : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = sinusoidal_pos_embds
A = n_layers
A = n_heads
A = dim
A = hidden_dim
A = dropout
A = attention_dropout
A = activation
A = initializer_range
A = qa_dropout
A = seq_classif_dropout
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE)
class __UpperCamelCase ( _A ):
@property
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
if self.task == "multiple-choice":
A = {0: "batch", 1: "choice", 2: "sequence"}
else:
A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 355
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowercase__ = 600_851_475_143 ):
"""simple docstring"""
try:
A = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A = 2
A = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A = i
while n % i == 0:
A = n // i
i += 1
return int(lowercase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 57
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCAmelCase : str = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def A ( lowercase ) -> Any:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def A ( lowercase ) -> Dict:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
if exitstatus == 5:
UpperCamelCase = 0
# Doctest custom flag to ignore output.
_UpperCAmelCase : Any = doctest.register_optionflag("IGNORE_RESULT")
_UpperCAmelCase : List[str] = doctest.OutputChecker
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A_ , A_ , A_ )
_UpperCAmelCase : Any = CustomOutputChecker
_UpperCAmelCase : str = HfDoctestModule
_UpperCAmelCase : Optional[Any] = HfDocTestParser
| 222
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCamelCase = s_dict.pop(lowercase )
elif "subsample" in key:
UpperCamelCase = s_dict.pop(lowercase )
def A ( lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(lowercase , lowercase , bias=lowercase )
UpperCamelCase = emb.weight.data
return lin_layer
def A ( lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = torch.load(lowercase , map_location='cpu' )
UpperCamelCase = mam_aaa['args']
UpperCamelCase = mam_aaa['model']
UpperCamelCase = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(lowercase )
rename_keys(lowercase )
UpperCamelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
UpperCamelCase = args.share_decoder_input_output_embed
UpperCamelCase = [int(lowercase ) for i in args.conv_kernel_sizes.split(',' )]
UpperCamelCase = SpeechaTextConfig(
vocab_size=lowercase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(lowercase ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowercase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowercase , num_beams=5 , max_length=200 , use_cache=lowercase , decoder_start_token_id=2 , early_stopping=lowercase , )
UpperCamelCase = SpeechaTextForConditionalGeneration(lowercase )
UpperCamelCase , UpperCamelCase = model.model.load_state_dict(lowercase , strict=lowercase )
if len(lowercase ) > 0 and not set(lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
UpperCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCamelCase = lm_head_weights
model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCAmelCase : Dict = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 222
| 1
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( enum.Enum ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
@add_end_docstrings(_UpperCamelCase )
class lowerCAmelCase__ ( _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'generated'
def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if truncation is not None:
__SCREAMING_SNAKE_CASE = truncation
__SCREAMING_SNAKE_CASE = generate_kwargs
__SCREAMING_SNAKE_CASE = {}
if return_tensors is not None and return_type is None:
__SCREAMING_SNAKE_CASE = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__SCREAMING_SNAKE_CASE = return_type
if clean_up_tokenization_spaces is not None:
__SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces
if stop_sequence is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
__SCREAMING_SNAKE_CASE = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : str , *__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , _UpperCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
__SCREAMING_SNAKE_CASE = ([prefix + arg for arg in args[0]],)
__SCREAMING_SNAKE_CASE = True
elif isinstance(args[0] , _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (prefix + args[0],)
__SCREAMING_SNAKE_CASE = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
__SCREAMING_SNAKE_CASE = self.tokenizer(*_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[int] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
if (
isinstance(args[0] , _UpperCAmelCase )
and all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for el in args[0] )
and all(len(_UpperCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=TruncationStrategy.DO_NOT_TRUNCATE , **__SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._parse_and_tokenize(_UpperCAmelCase , truncation=_UpperCAmelCase , **_UpperCAmelCase )
return inputs
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = model_inputs['input_ids'].shape
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.shape(model_inputs["""input_ids"""] ).numpy()
__SCREAMING_SNAKE_CASE = generate_kwargs.get("""min_length""" , self.model.config.min_length )
__SCREAMING_SNAKE_CASE = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(_UpperCAmelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
__SCREAMING_SNAKE_CASE = self.model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = output_ids.shape[0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = output_ids.reshape(_UpperCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.reshape(_UpperCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=ReturnType.TEXT , __SCREAMING_SNAKE_CASE : Dict=False ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__SCREAMING_SNAKE_CASE = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
__SCREAMING_SNAKE_CASE = {
f'{self.return_name}_text': self.tokenizer.decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
}
records.append(_UpperCAmelCase )
return records
@add_end_docstrings(_UpperCamelCase )
class lowerCAmelCase__ ( _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'summary'
def __call__( self : Any , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
return super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(_UpperCamelCase )
class lowerCAmelCase__ ( _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'translation'
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"""increasing your max_length manually, e.g. translator(\'...\', max_length=400)""" )
return True
def UpperCAmelCase__ ( self : Any , *__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=TruncationStrategy.DO_NOT_TRUNCATE , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , """_build_translation_inputs""" , _UpperCAmelCase ):
return self.tokenizer._build_translation_inputs(
*_UpperCAmelCase , return_tensors=self.framework , truncation=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase )
else:
return super()._parse_and_tokenize(*_UpperCAmelCase , truncation=_UpperCAmelCase )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Dict ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = super()._sanitize_parameters(**_UpperCAmelCase )
if src_lang is not None:
__SCREAMING_SNAKE_CASE = src_lang
if tgt_lang is not None:
__SCREAMING_SNAKE_CASE = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__SCREAMING_SNAKE_CASE = kwargs.get("""task""" , self.task )
__SCREAMING_SNAKE_CASE = task.split("""_""" )
if task and len(_UpperCAmelCase ) == 4:
# translation, XX, to YY
__SCREAMING_SNAKE_CASE = items[1]
__SCREAMING_SNAKE_CASE = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
"""simple docstring"""
return super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
| 369
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) )
__SCREAMING_SNAKE_CASE = os.path.join(a__ , """words.txt""" )
__SCREAMING_SNAKE_CASE = """"""
with open(a__ ) as f:
__SCREAMING_SNAKE_CASE = f.readline()
__SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
__SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 331
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : int = """switch_transformers"""
__lowerCamelCase : Optional[Any] = ["""past_key_values"""]
__lowerCamelCase : Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , a=3_2128 , a=768 , a=64 , a=2048 , a=64 , a=12 , a=3 , a=12 , a=3 , a=12 , a=8 , a=False , a=0.01 , a="float32" , a=False , a=32 , a=128 , a=0.1 , a=1e-6 , a=0.001 , a=0.001 , a=1.0 , a="relu" , a=True , a=False , a=True , a=0 , a=1 , **a , ):
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[Any] = d_model
lowercase__ : List[Any] = d_kv
lowercase__ : Any = d_ff
lowercase__ : Optional[int] = num_sparse_encoder_layers
lowercase__ : Tuple = num_layers
lowercase__ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ : str = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase__ : Optional[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
lowercase__ : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase__ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase__ : int = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase__ : List[Any] = num_heads
lowercase__ : Union[str, Any] = num_experts
lowercase__ : str = expert_capacity
lowercase__ : List[Any] = router_bias
lowercase__ : Optional[int] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
lowercase__ : str = router_dtype
lowercase__ : Optional[int] = router_ignore_padding_tokens
lowercase__ : int = relative_attention_num_buckets
lowercase__ : Optional[Any] = relative_attention_max_distance
lowercase__ : List[str] = dropout_rate
lowercase__ : str = layer_norm_epsilon
lowercase__ : int = initializer_factor
lowercase__ : int = feed_forward_proj
lowercase__ : Dict = use_cache
lowercase__ : int = add_router_probs
lowercase__ : int = router_z_loss_coef
lowercase__ : List[Any] = router_aux_loss_coef
lowercase__ : int = self.feed_forward_proj.split('-')
lowercase__ : Optional[int] = act_info[-1]
lowercase__ : Dict = act_info[0] == 'gated'
if len(a) > 1 and act_info[0] != "gated" or len(a) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase__ : Optional[int] = 'gelu_new'
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , **a , )
| 214
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
lowercase__ : Optional[int] = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == len(set(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
snake_case_ = input('''Enter a string ''').strip()
snake_case_ = is_isogram(input_str)
print(F'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 214
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Union[str, Any] ={'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str =['''GLPNFeatureExtractor''']
lowerCamelCase : Optional[Any] =['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] =[
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 371
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list:
UpperCamelCase__ : Tuple = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCamelCase__ : Tuple = True
for i in range(0 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase__ , UpperCamelCase__ : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase__ : List[str] = False
for i in range(1 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase__ : List[Any] = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCamelCase : Any =[int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCamelCase : Optional[int] =odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 196
| 0
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase ( __magic_name__ : Tuple ) -> int:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A ( nn.Module ):
'''simple docstring'''
def __init__(self : List[Any] , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = module
lowercase__ = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
lowercase__ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Dict , *_UpperCAmelCase : int , **_UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
A__ = '''bigscience/bloom-1b7'''
# Constant values
A__ = 2.1_09_65_95_52_69_25_74
A__ = '''Hello my name is'''
A__ = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
A__ = 10
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained(self.model_name )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
# Models and tokenizer
lowercase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
def lowerCamelCase__ (self : int ) -> List[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Any ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , """quantization_config""" ) )
lowercase__ = config.to_dict()
lowercase__ = config.to_diff_dict()
lowercase__ = config.to_json_string()
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
lowercase__ = self.model_fpaa.get_memory_footprint()
lowercase__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowercase__ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer(self.input_text , return_tensors="""pt""" )
lowercase__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowerCamelCase__ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = BitsAndBytesConfig()
lowercase__ = True
lowercase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map="""auto""" )
lowercase__ = self.tokenizer(self.input_text , return_tensors="""pt""" )
lowercase__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
lowercase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowercase__ = self.tokenizer(self.input_text , return_tensors="""pt""" )
lowercase__ = self.model_fpaa.to(torch.floataa )
lowercase__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
lowercase__ = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
lowercase__ = self.model_fpaa.half()
# Check this does not throw an error
lowercase__ = self.model_fpaa.float()
def lowerCamelCase__ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """t5-small"""
lowercase__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
lowercase__ = AutoTokenizer.from_pretrained(cls.model_name )
lowercase__ = """Translate in German: Hello, my dog is cute"""
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
from transformers import TaForConditionalGeneration
lowercase__ = TaForConditionalGeneration._keep_in_fpaa_modules
lowercase__ = None
# test with `t5-small`
lowercase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
lowercase__ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
lowercase__ = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
lowercase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
lowercase__ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
lowercase__ = model.generate(**_UpperCAmelCase )
lowercase__ = modules
def lowerCamelCase__ (self : Dict ) -> Dict:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowercase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowercase__ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
lowercase__ = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
lowercase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
lowercase__ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
lowercase__ = model.generate(**_UpperCAmelCase )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
lowercase__ = """bigscience/bloom-560m"""
lowercase__ = """t5-small"""
# Different types of model
lowercase__ = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
# Sequence classification model
lowercase__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
# CausalLM model
lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
# Seq2seq model
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
def lowerCamelCase__ (self : str ) -> List[Any]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Optional[int] ) -> int:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
super().setUp()
def lowerCamelCase__ (self : str ) -> Optional[int]:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowercase__ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase__ (self : Any ) -> Dict:
"""simple docstring"""
super().setUp()
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowercase__ = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
lowercase__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """facebook/opt-350m"""
super().setUp()
def lowerCamelCase__ (self : Any ) -> Dict:
"""simple docstring"""
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowercase__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowercase__ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
lowercase__ = LoRALayer(module.q_proj , rank=16 )
lowercase__ = LoRALayer(module.k_proj , rank=16 )
lowercase__ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
lowercase__ = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowercase__ = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''gpt2-xl'''
A__ = 3.31_91_85_48_54_15_21_87
| 305
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tmp_path / """file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
lowercase__ = tmp_path / """malformed_file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_image.csv"""
lowercase__ = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_label.csv"""
lowercase__ = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_int_list.csv"""
lowercase__ = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__magic_name__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__magic_name__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowercase__ = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowercase__ = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__magic_name__ ) for label in labels]
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __magic_name__ : [int(__magic_name__ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowercase__ = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 305
| 1
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCamelCase ( _A : Tuple )-> Dict:
"""simple docstring"""
A__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def UpperCamelCase ( _A : int )-> Optional[Any]:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(_A , _A , bias=_A )
A__ = emb.weight.data
return lin_layer
def UpperCamelCase ( _A : str , _A : Optional[Any]=None )-> str:
"""simple docstring"""
A__ = {}
for old_key in state_dict.keys():
A__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A__ = key.replace("moe_layer.experts.0" , f"""ffn.experts.expert_{expert_idx}""" )
else:
A__ = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
A__ = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
A__ = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
A__ = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
A__ = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
A__ = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
A__ = key.replace("final_layer_norm" , "ff_layer_norm" )
A__ = state_dict[old_key]
return new_dict
def UpperCamelCase ( _A : Tuple , _A : Tuple , _A : int , _A : str , _A : str = WEIGHTS_NAME )-> List[str]:
"""simple docstring"""
A__ = []
A__ = 0
os.makedirs(_A , exist_ok=_A )
for expert in range(_A ):
A__ = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(_A ):
A__ = torch.load(_A )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = os.path.join(
_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
torch.save(_A , _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
A__ = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
A__ = os.path.join(_A , _A )
torch.save(_A , _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A , _A )
# Otherwise, let's build the index
A__ = {}
for idx, shard in enumerate(_A ):
A__ = weights_name.replace(".bin" , f"""-{idx+1:05d}-of-{len(_A ):05d}.bin""" )
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_A , os.path.join(_A , _A ) )
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {"total_size": total_size}
A__ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_A , _A ) , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
UpperCAmelCase_ : Any = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCAmelCase_ : Tuple = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 198
|
import argparse
import struct
import unittest
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ ):
A__ = data
# Initialize hash values
A__ = [
0x6A_09E_667,
0xBB_67A_E85,
0x3C_6EF_372,
0xA5_4FF_53A,
0x51_0E5_27F,
0x9B_056_88C,
0x1F_83D_9AB,
0x5B_E0C_D19,
]
# Initialize round constants
A__ = [
0x42_8A2_F98,
0x71_374_491,
0xB5_C0F_BCF,
0xE9_B5D_BA5,
0x39_56C_25B,
0x59_F11_1F1,
0x92_3F8_2A4,
0xAB_1C5_ED5,
0xD8_07A_A98,
0x12_835_B01,
0x24_318_5BE,
0x55_0C7_DC3,
0x72_BE5_D74,
0x80_DEB_1FE,
0x9B_DC0_6A7,
0xC1_9BF_174,
0xE4_9B6_9C1,
0xEF_BE4_786,
0x0F_C19_DC6,
0x24_0CA_1CC,
0x2D_E92_C6F,
0x4A_748_4AA,
0x5C_B0A_9DC,
0x76_F98_8DA,
0x98_3E5_152,
0xA8_31C_66D,
0xB0_032_7C8,
0xBF_597_FC7,
0xC6_E00_BF3,
0xD5_A79_147,
0x06_CA6_351,
0x14_292_967,
0x27_B70_A85,
0x2E_1B2_138,
0x4D_2C6_DFC,
0x53_380_D13,
0x65_0A7_354,
0x76_6A0_ABB,
0x81_C2C_92E,
0x92_722_C85,
0xA2_BFE_8A1,
0xA8_1A6_64B,
0xC2_4B8_B70,
0xC7_6C5_1A3,
0xD1_92E_819,
0xD6_990_624,
0xF4_0E3_585,
0x10_6AA_070,
0x19_A4C_116,
0x1E_376_C08,
0x27_487_74C,
0x34_B0B_CB5,
0x39_1C0_CB3,
0x4E_D8A_A4A,
0x5B_9CC_A4F,
0x68_2E6_FF3,
0x74_8F8_2EE,
0x78_A56_36F,
0x84_C87_814,
0x8C_C70_208,
0x90_BEF_FFA,
0xA4_506_CEB,
0xBE_F9A_3F7,
0xC6_717_8F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __A ( UpperCAmelCase__ ):
A__ = b"\x80" + (b"\x00" * (63 - (len(UpperCAmelCase__ ) + 8) % 64))
A__ = struct.pack(">Q" , (len(UpperCAmelCase__ ) * 8) )
return data + padding + big_endian_integer
def __A ( self ):
# Convert into blocks of 64 bytes
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack(">16L" , UpperCAmelCase__ ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
A__ = self.ror(UpperCAmelCase__ , 6 ) ^ self.ror(UpperCAmelCase__ , 11 ) ^ self.ror(UpperCAmelCase__ , 25 )
A__ = (e & f) ^ ((~e & 0xFF_FFF_FFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
A__ = self.ror(UpperCAmelCase__ , 2 ) ^ self.ror(UpperCAmelCase__ , 13 ) ^ self.ror(UpperCAmelCase__ , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100_000_000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
A__ = "".join([hex(UpperCAmelCase__ )[2:].zfill(8 ) for value in self.hashes] )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
return 0xFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
import hashlib
A__ = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(UpperCAmelCase__ ).hash , hashlib.shaaaa(UpperCAmelCase__ ).hexdigest() )
def UpperCamelCase ( )-> None:
"""simple docstring"""
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
A__ = f.read()
else:
A__ = bytes(_A , "utf-8" )
print(SHAaaa(_A ).hash )
if __name__ == "__main__":
main()
| 198
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Dict = 0.00
SCREAMING_SNAKE_CASE__ : Any = 0
for resistor in resistors:
if resistor <= 0:
SCREAMING_SNAKE_CASE__ : str = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(_UpperCamelCase )
first_sum += 1 / float(_UpperCamelCase )
index += 1
return 1 / first_sum
def _lowercase ( __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple = 0.00
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''Resistor at index {index} has a negative value!'''
raise ValueError(_UpperCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 132
|
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57
| 0
|
"""simple docstring"""
def a__ ( snake_case__ ) -> List[Any]:
lowerCamelCase = [0] * len(snake_case__ )
lowerCamelCase = []
lowerCamelCase = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
lowerCamelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
lowerCAmelCase : List[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 168
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = False, False, False
@dataclass
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__UpperCamelCase = field(default="Audio" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(_a , _a ):
return {"bytes": None, "path": value}
elif isinstance(_a , _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCamelCase = BytesIO()
sf.write(_a , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCamelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
lowerCamelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
lowerCamelCase = BytesIO(bytes() )
sf.write(_a , _a , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowerCamelCase , lowerCamelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowerCamelCase = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowerCamelCase = token_per_repo_id or {}
lowerCamelCase = path.split("""::""" )[-1]
try:
lowerCamelCase = string_to_dict(_a , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCamelCase = None
with xopen(_a , """rb""" , use_auth_token=_a ) as f:
lowerCamelCase , lowerCamelCase = sf.read(_a )
else:
lowerCamelCase , lowerCamelCase = sf.read(_a )
lowerCamelCase = array.T
if self.mono:
lowerCamelCase = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCamelCase = librosa.resample(_a , orig_sr=_a , target_sr=self.sampling_rate )
lowerCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowerCAmelCase ( self ):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.binary() )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.string() )
lowerCamelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowerCamelCase = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCamelCase = storage.field("""bytes""" )
else:
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCamelCase = storage.field("""path""" )
else:
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.string() )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_a , self.pa_type )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a , """rb""" ) as f:
lowerCamelCase = f.read()
return bytes_
lowerCamelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_a , self.pa_type )
| 168
| 1
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase : Union[str, Any] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
lowercase : Union[str, Any] = None
def A_ ( ) -> Dict:
a__ : Dict = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=A__ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=A__ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def A_ ( A__ ) -> int:
a__ : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : Optional[int] = bool(qa['answers']['text'] )
return qid_to_has_ans
def A_ ( A__ ) -> List[Any]:
def remove_articles(A__ ):
return ARTICLES_REGEX.sub(' ' , A__ )
def white_space_fix(A__ ):
return " ".join(text.split() )
def remove_punc(A__ ):
a__ : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def A_ ( A__ ) -> Union[str, Any]:
if not s:
return []
return normalize_answer(A__ ).split()
def A_ ( A__ , A__ ) -> Optional[Any]:
return int(normalize_answer(A__ ) == normalize_answer(A__ ) )
def A_ ( A__ , A__ ) -> Any:
a__ : Tuple = get_tokens(A__ )
a__ : Optional[int] = get_tokens(A__ )
a__ : int = collections.Counter(A__ ) & collections.Counter(A__ )
a__ : Optional[Any] = sum(common.values() )
if len(A__ ) == 0 or len(A__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__ : int = 1.0 * num_same / len(A__ )
a__ : List[Any] = 1.0 * num_same / len(A__ )
a__ : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def A_ ( A__ , A__ ) -> Any:
a__ : Tuple = {}
a__ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : Optional[int] = qa['id']
a__ : Any = [t for t in qa['answers']['text'] if normalize_answer(A__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__ : List[str] = ['']
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
a__ : Union[str, Any] = preds[qid]
# Take max over all gold answers
a__ : Tuple = max(compute_exact(A__ , A__ ) for a in gold_answers )
a__ : List[Any] = max(compute_fa(A__ , A__ ) for a in gold_answers )
return exact_scores, fa_scores
def A_ ( A__ , A__ , A__ , A__ ) -> Tuple:
a__ : List[Any] = {}
for qid, s in scores.items():
a__ : Tuple = na_probs[qid] > na_prob_thresh
if pred_na:
a__ : str = float(not qid_to_has_ans[qid] )
else:
a__ : int = s
return new_scores
def A_ ( A__ , A__ , A__=None ) -> List[Any]:
if not qid_list:
a__ : str = len(A__ )
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores.values() ) / total),
('f1', 1_00.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
a__ : int = len(A__ )
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def A_ ( A__ , A__ , A__ ) -> Optional[int]:
for k in new_eval:
a__ : Optional[int] = new_eval[k]
def A_ ( A__ , A__ , A__ , A__ ) -> Union[str, Any]:
plt.step(A__ , A__ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(A__ , A__ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(A__ )
plt.savefig(A__ )
plt.clf()
def A_ ( A__ , A__ , A__ , A__ , A__=None , A__=None ) -> Any:
a__ : str = sorted(A__ , key=lambda A__ : na_probs[k] )
a__ : Tuple = 0.0
a__ : List[str] = 1.0
a__ : Optional[int] = 0.0
a__ : Any = [1.0]
a__ : Optional[int] = [0.0]
a__ : Tuple = 0.0
for i, qid in enumerate(A__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__ : Union[str, Any] = true_pos / float(i + 1 )
a__ : List[Any] = true_pos / float(A__ )
if i == len(A__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(A__ )
recalls.append(A__ )
if out_image:
plot_pr_curve(A__ , A__ , A__ , A__ )
return {"ap": 1_00.0 * avg_prec}
def A_ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> str:
if out_image_dir and not os.path.exists(A__ ):
os.makedirs(A__ )
a__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__ : Optional[int] = make_precision_recall_eval(
A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
a__ : Optional[int] = make_precision_recall_eval(
A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
a__ : int = {k: float(A__ ) for k, v in qid_to_has_ans.items()}
a__ : Optional[Any] = make_precision_recall_eval(
A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(A__ , A__ , 'pr_exact' )
merge_eval(A__ , A__ , 'pr_f1' )
merge_eval(A__ , A__ , 'pr_oracle' )
def A_ ( A__ , A__ , A__ , A__ ) -> List[Any]:
if not qid_list:
return
a__ : List[str] = [na_probs[k] for k in qid_list]
a__ : Dict = np.ones_like(A__ ) / float(len(A__ ) )
plt.hist(A__ , weights=A__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(A__ , F'na_prob_hist_{name}.png' ) )
plt.clf()
def A_ ( A__ , A__ , A__ , A__ ) -> Optional[int]:
a__ : Any = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__ : List[str] = num_no_ans
a__ : Tuple = cur_score
a__ : Tuple = 0.0
a__ : str = sorted(A__ , key=lambda A__ : na_probs[k] )
for i, qid in enumerate(A__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__ : str = scores[qid]
else:
if preds[qid]:
a__ : int = -1
else:
a__ : Tuple = 0
cur_score += diff
if cur_score > best_score:
a__ : Dict = cur_score
a__ : Tuple = na_probs[qid]
return 1_00.0 * best_score / len(A__ ), best_thresh
def A_ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
a__ , a__ : str = find_best_thresh(A__ , A__ , A__ , A__ )
a__ , a__ : Tuple = find_best_thresh(A__ , A__ , A__ , A__ )
a__ : Optional[Any] = best_exact
a__ : Optional[Any] = exact_thresh
a__ : Tuple = best_fa
a__ : int = fa_thresh
def A_ ( ) -> Union[str, Any]:
with open(OPTS.data_file ) as f:
a__ : Optional[int] = json.load(A__ )
a__ : Any = dataset_json['data']
with open(OPTS.pred_file ) as f:
a__ : Optional[Any] = json.load(A__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__ : int = json.load(A__ )
else:
a__ : Union[str, Any] = {k: 0.0 for k in preds}
a__ : str = make_qid_to_has_ans(A__ ) # maps qid to True/False
a__ : int = [k for k, v in qid_to_has_ans.items() if v]
a__ : Tuple = [k for k, v in qid_to_has_ans.items() if not v]
a__ , a__ : str = get_raw_scores(A__ , A__ )
a__ : str = apply_no_ans_threshold(A__ , A__ , A__ , OPTS.na_prob_thresh )
a__ : Union[str, Any] = apply_no_ans_threshold(A__ , A__ , A__ , OPTS.na_prob_thresh )
a__ : List[Any] = make_eval_dict(A__ , A__ )
if has_ans_qids:
a__ : str = make_eval_dict(A__ , A__ , qid_list=A__ )
merge_eval(A__ , A__ , 'HasAns' )
if no_ans_qids:
a__ : int = make_eval_dict(A__ , A__ , qid_list=A__ )
merge_eval(A__ , A__ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(A__ , A__ , A__ , A__ , A__ , A__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(A__ , A__ , A__ , A__ , A__ , OPTS.out_image_dir )
histogram_na_prob(A__ , A__ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(A__ , A__ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(A__ , A__ )
else:
print(json.dumps(A__ , indent=2 ) )
if __name__ == "__main__":
lowercase : List[Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 99
|
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase :Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase :List[Any] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase :Optional[Any] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase :Union[str, Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase :Tuple = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) )
__magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase ( lowerCAmelCase : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = PokerHand(lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS]
__magic_name__ : Tuple = poker_hands.copy()
shuffle(lowerCAmelCase )
__magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) )
for index, hand in enumerate(lowerCAmelCase ):
assert hand == poker_hands[index]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' )
__magic_name__ : Optional[Any] = True
__magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) )
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' )
with open(lowerCAmelCase ) as file_hand:
for line in file_hand:
__magic_name__ : Optional[int] = line[:14].strip()
__magic_name__ : List[Any] = line[15:].strip()
__magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase )
__magic_name__ : List[Any] = player.compare_with(lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376
| 331
| 0
|
import requests
from bsa import BeautifulSoup
def UpperCamelCase_( _snake_case : Tuple = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
__a =BeautifulSoup(requests.get(__lowerCAmelCase ).text , 'html.parser' )
__a =soup.findAll('h1' )
__a =soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 362
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 0
|
import math
import sys
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = ''
try:
with open(__lowerCAmelCase , 'rb' ) as binary_file:
_UpperCAmelCase = binary_file.read()
for dat in data:
_UpperCAmelCase = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = {'0': '0', '1': '1'}
_UpperCAmelCase , _UpperCAmelCase = '', ''
_UpperCAmelCase = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCAmelCase = lexicon[curr_string]
result += last_match_id
_UpperCAmelCase = last_match_id + '0'
if math.loga(__lowerCAmelCase ).is_integer():
_UpperCAmelCase = {}
for curr_key in list(__lowerCAmelCase ):
_UpperCAmelCase = lexicon.pop(__lowerCAmelCase )
_UpperCAmelCase = new_lex
_UpperCAmelCase = last_match_id + '1'
index += 1
_UpperCAmelCase = ''
return result
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
_UpperCAmelCase = 8
try:
with open(__lowerCAmelCase , 'wb' ) as opened_file:
_UpperCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_UpperCAmelCase = data_bits[counter:]
_UpperCAmelCase = data_bits[counter + 1 :]
return data_bits
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
_UpperCAmelCase = read_file_binary(__lowerCAmelCase )
_UpperCAmelCase = remove_prefix(__lowerCAmelCase )
_UpperCAmelCase = decompress_data(__lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 39
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def snake_case_ ( snake_case=32 , snake_case=10 , snake_case=1_00 , snake_case=10_26 , snake_case=True , snake_case="data/tokenized_stories_train_wikitext103.jbl" , snake_case="igf_context_pairs.jbl" , ) -> Union[str, Any]:
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__: List[str] = generate_datasets(
snake_case , snake_case , number=snake_case , min_len=10_26 , trim=snake_case )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__: Optional[Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
lowercase__: str = load_gpta('gpt2' ).to(snake_case )
print('computing perplexity on objective set' )
lowercase__: int = compute_perplexity(snake_case , snake_case , snake_case ).item()
print('perplexity on objective set:' , snake_case )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def snake_case_ ( snake_case , snake_case=15 , snake_case=1_28 , snake_case=1_00 , snake_case="igf_model.pt" , ) -> Optional[Any]:
set_seed(42 )
# Load pre-trained model
lowercase__: Any = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
lowercase__: Any = SecondaryLearner(snake_case )
# Train secondary learner
lowercase__: Tuple = train_secondary_learner(
snake_case , snake_case , max_epochs=snake_case , batch_size=snake_case , eval_freq=1_00 , igf_model_path=snake_case , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def snake_case_ ( snake_case , snake_case , snake_case , snake_case=32 , snake_case=10_00 , snake_case=16 , snake_case=1.0 , snake_case=recopy_gpta , snake_case=None , snake_case=10 , snake_case="gpt2_finetuned.pt" , ) -> Tuple:
lowercase__: Dict = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
lowercase__: Optional[int] = RandomSampler(snake_case )
lowercase__: Optional[int] = DataLoader(snake_case , sampler=snake_case )
lowercase__: int = max_steps // (len(snake_case )) + 1
lowercase__: Union[str, Any] = 0
lowercase__: Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case )
lowercase__ , lowercase__ , lowercase__: Union[str, Any] = recopy_model(snake_case , snake_case , snake_case )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case )
secondary_learner.eval()
lowercase__: List[Any] = []
lowercase__: str = 0
lowercase__: Tuple = []
lowercase__: Dict = []
# Compute the performance of the transformer model at the beginning
lowercase__: Optional[Any] = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print('Test perplexity, step' , snake_case , ':' , snake_case )
for epoch in range(int(snake_case ) ):
for step, example in enumerate(snake_case ):
torch.cuda.empty_cache()
lowercase__: Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__: Dict = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__: Union[str, Any] = model(snake_case , labels=snake_case )
lowercase__: Tuple = True
if secondary_learner is not None:
lowercase__: Optional[Any] = secondary_learner.forward(
torch.tensor(snake_case , dtype=torch.long , device=snake_case ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__: Optional[Any] = -1
if predicted_q < threshold:
lowercase__: str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__: List[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__: Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__: int = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print('Test perplexity, step' , snake_case , ':' , snake_case )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def snake_case_ ( ) -> str:
lowercase__: Tuple = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=snake_case , type=snake_case , required=snake_case , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=snake_case , type=snake_case , required=snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=snake_case , default=snake_case , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=snake_case , default=snake_case , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=snake_case , type=snake_case , required=snake_case , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=snake_case , type=snake_case , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=snake_case , default=snake_case , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=snake_case , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=1_00 , type=snake_case , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=1_00 , type=snake_case , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=10_00 , type=snake_case , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=1_28 , type=snake_case , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=snake_case , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=snake_case , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=1_00 , type=snake_case , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=10_26 , type=snake_case , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=snake_case , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=snake_case , type=snake_case , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=snake_case , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=snake_case , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=snake_case , type=snake_case , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=snake_case , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
lowercase__: Tuple = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
lowercase__: List[str] = training_secondary_learner(
snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
lowercase__: Dict = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__: Tuple = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=1_00 , min_len=10_26 , trim=snake_case )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case , snake_case , snake_case , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=snake_case , secondary_learner=snake_case , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 196
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
stooge(_UpperCamelCase , 0 , len(_UpperCamelCase ) - 1 )
return arr
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_SCREAMING_SNAKE_CASE =(int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_UpperCamelCase , _UpperCamelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(_UpperCamelCase , i + t , (_UpperCamelCase) )
# Recursively sort first 2/3 elements
stooge(_UpperCamelCase , _UpperCamelCase , (h - t) )
if __name__ == "__main__":
lowerCamelCase : str = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 366
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase : List[str] = random.Random()
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]=1.0 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Dict=None ) -> Any:
"""simple docstring"""
if rng is None:
_SCREAMING_SNAKE_CASE =global_rng
_SCREAMING_SNAKE_CASE =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
def __init__( self : List[Any] , _a : Tuple , _a : Dict=7 , _a : List[Any]=400 , _a : List[str]=2000 , _a : Optional[Any]=10 , _a : Dict=160 , _a : Tuple=8 , _a : Any=0.0 , _a : Optional[Any]=4000 , _a : List[Any]=False , _a : Dict=True , ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =min_seq_length
_SCREAMING_SNAKE_CASE =max_seq_length
_SCREAMING_SNAKE_CASE =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_SCREAMING_SNAKE_CASE =padding_value
_SCREAMING_SNAKE_CASE =sampling_rate
_SCREAMING_SNAKE_CASE =return_attention_mask
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =feature_size
_SCREAMING_SNAKE_CASE =chunk_length
_SCREAMING_SNAKE_CASE =hop_length
def A ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A ( self : Optional[Any] , _a : Any=False , _a : Union[str, Any]=False ) -> Optional[Any]:
'''simple docstring'''
def _flatten(_a : Union[str, Any] ):
return list(itertools.chain(*_a ) )
if equal_length:
_SCREAMING_SNAKE_CASE =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_SCREAMING_SNAKE_CASE =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( A__ , unittest.TestCase ):
A__ = WhisperFeatureExtractor if is_speech_available() else None
def A ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =WhisperFeatureExtractionTester(self )
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_SCREAMING_SNAKE_CASE =self.feature_extraction_class.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =feat_extract_first.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_second.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_first.mel_filters
_SCREAMING_SNAKE_CASE =feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def A ( self : Tuple ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =os.path.join(_a , 'feat_extract.json' )
feat_extract_first.to_json_file(_a )
_SCREAMING_SNAKE_CASE =self.feature_extraction_class.from_json_file(_a )
_SCREAMING_SNAKE_CASE =feat_extract_first.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_second.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_first.mel_filters
_SCREAMING_SNAKE_CASE =feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_SCREAMING_SNAKE_CASE =feature_extractor(_a , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_SCREAMING_SNAKE_CASE =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_SCREAMING_SNAKE_CASE =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in (800, 800, 800)]
_SCREAMING_SNAKE_CASE =np.asarray(_a )
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for speech_input in speech_inputs]
_SCREAMING_SNAKE_CASE =[x[: feature_extractor.n_samples] for x in speech_inputs]
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for speech_input in speech_inputs_truncated]
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_SCREAMING_SNAKE_CASE =np.random.rand(100 , 32 ).astype(np.floataa )
_SCREAMING_SNAKE_CASE =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_SCREAMING_SNAKE_CASE =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_SCREAMING_SNAKE_CASE =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def A ( self : Tuple , _a : str ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_SCREAMING_SNAKE_CASE =ds.sort('id' ).select(range(_a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
_SCREAMING_SNAKE_CASE =self._load_datasamples(1 )
_SCREAMING_SNAKE_CASE =WhisperFeatureExtractor()
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_SCREAMING_SNAKE_CASE =self._load_datasamples(1 )[0]
_SCREAMING_SNAKE_CASE =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_SCREAMING_SNAKE_CASE =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 114
| 0
|
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
lowercase__ : List[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCAmelCase )
lowercase__ : int = self.values[key]
def _lowerCAmelCase( self ) -> Tuple:
return (
sum(self.charge_factor - len(__lowerCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None ) -> Tuple:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCAmelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCAmelCase , __lowerCAmelCase )
| 198
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def __UpperCamelCase ( UpperCAmelCase ):
return input_array.reshape((input_array.size, 1) )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = np.nan
for i in range(UpperCAmelCase ):
lowercase__ : Optional[Any] = features[:, labels == i]
lowercase__ : Optional[Any] = data.mean(1 )
# Centralize the data of class i
lowercase__ : Dict = data - column_reshape(UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : List[str] = np.dot(UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Tuple = features.mean(1 )
lowercase__ : Dict = np.nan
for i in range(UpperCAmelCase ):
lowercase__ : List[str] = features[:, labels == i]
lowercase__ : int = data.shape[1]
lowercase__ : Optional[int] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : Optional[int] = device_data * np.dot(
column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# Check if the features have been loaded
if features.any():
lowercase__ : Optional[Any] = features.mean(1 )
# Center the dataset
lowercase__ : List[str] = features - np.reshape(UpperCAmelCase , (data_mean.size, 1) )
lowercase__ : Optional[Any] = np.dot(UpperCAmelCase , centered_data.T ) / features.shape[1]
lowercase__ , lowercase__ : Tuple = np.linalg.eigh(UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
lowercase__ : str = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowercase__ : Tuple = np.dot(filtered_eigenvectors.T , UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowercase__ , lowercase__ : Any = eigh(
covariance_between_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , covariance_within_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
lowercase__ : Optional[int] = eigenvectors[:, ::-1][:, :dimensions]
lowercase__ , lowercase__ , lowercase__ : Optional[int] = np.linalg.svd(UpperCAmelCase )
lowercase__ : List[str] = svd_matrix[:, 0:dimensions]
lowercase__ : str = np.dot(filtered_svd_matrix.T , UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __UpperCamelCase ( ):
# Create dummy dataset with 2 classes and 3 features
lowercase__ : List[str] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowercase__ : Optional[Any] = np.array([0, 0, 0, 1, 1] )
lowercase__ : str = 2
lowercase__ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCAmelCase ) as error_info:
lowercase__ : int = linear_discriminant_analysis(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if isinstance(UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __UpperCamelCase ( ):
lowercase__ : Optional[int] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowercase__ : int = 2
lowercase__ : Any = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCAmelCase ) as error_info:
lowercase__ : Dict = principal_component_analysis(UpperCAmelCase , UpperCAmelCase )
if not np.allclose(UpperCAmelCase , UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198
| 1
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __snake_case ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__( self , lowercase=None , **lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__(features=lowercase)
a__: List[Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
import torch
if isinstance(lowercase , lowercase) and column:
if all(
isinstance(lowercase , torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(lowercase)
return column
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
import torch
if isinstance(lowercase , (str, bytes, type(lowercase))):
return value
elif isinstance(lowercase , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
a__: List[Any] = {}
if isinstance(lowercase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
a__: Any = {'dtype': torch.intaa}
elif isinstance(lowercase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
a__: Optional[Any] = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowercase , PIL.Image.Image):
a__: Any = np.asarray(lowercase)
return torch.tensor(lowercase , **{**default_dtype, **self.torch_tensor_kwargs})
def lowerCamelCase_ ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowercase , '__array__') and not isinstance(lowercase , torch.Tensor):
a__: Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowercase , np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowercase) for substruct in data_struct])
elif isinstance(lowercase , (list, tuple)):
return self._consolidate([self.recursive_tensorize(lowercase) for substruct in data_struct])
return self._tensorize(lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowercase , map_list=lowercase)
def lowerCamelCase_ ( self , lowercase) -> Mapping:
'''simple docstring'''
a__: Dict = self.numpy_arrow_extractor().extract_row(lowercase)
a__: Optional[Any] = self.python_features_decoder.decode_row(lowercase)
return self.recursive_tensorize(lowercase)
def lowerCamelCase_ ( self , lowercase) -> "torch.Tensor":
'''simple docstring'''
a__: Optional[Any] = self.numpy_arrow_extractor().extract_column(lowercase)
a__: List[str] = self.python_features_decoder.decode_column(lowercase , pa_table.column_names[0])
a__: Optional[int] = self.recursive_tensorize(lowercase)
a__: str = self._consolidate(lowercase)
return column
def lowerCamelCase_ ( self , lowercase) -> Mapping:
'''simple docstring'''
a__: Optional[Any] = self.numpy_arrow_extractor().extract_batch(lowercase)
a__: Dict = self.python_features_decoder.decode_batch(lowercase)
a__: Union[str, Any] = self.recursive_tensorize(lowercase)
for column_name in batch:
a__: Optional[int] = self._consolidate(batch[column_name])
return batch
| 203
|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __a ( _SCREAMING_SNAKE_CASE ) ->Tuple:
a__: Tuple = {}
a__: Tuple = job['started_at']
a__: int = job['completed_at']
a__: Any = date_parser.parse(_SCREAMING_SNAKE_CASE )
a__: Tuple = date_parser.parse(_SCREAMING_SNAKE_CASE )
a__: str = round((end_datetime - start_datetime).total_seconds() / 60.0 )
a__: Any = start
a__: Dict = end
a__: Optional[int] = duration_in_min
return job_info
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: Tuple = None
if token is not None:
a__: List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: int = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: str = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_SCREAMING_SNAKE_CASE ) for job in result['jobs']} )
a__: Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: str = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
job_time.update({job['name']: extract_time_from_single_job(_SCREAMING_SNAKE_CASE ) for job in result['jobs']} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
lowercase__ = parser.parse_args()
lowercase__ = get_job_time(args.workflow_run_id)
lowercase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"{k}: {v['duration']}")
| 203
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=30 , __magic_name__=4_00 , __magic_name__=True , __magic_name__=None , __magic_name__=0.9 , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , ) -> List[Any]:
_a = size if size is not None else {'shortest_edge': 30}
_a = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_a = parent
_a = batch_size
_a = num_channels
_a = min_resolution
_a = max_resolution
_a = do_resize_and_center_crop
_a = size
_a = crop_pct
_a = crop_size
_a = do_normalize
_a = image_mean
_a = image_std
def __UpperCAmelCase ( self ) -> Any:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> str:
_a = PoolFormerImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(__magic_name__ , 'size' ) )
self.assertTrue(hasattr(__magic_name__ , 'crop_pct' ) )
self.assertTrue(hasattr(__magic_name__ , 'do_normalize' ) )
self.assertTrue(hasattr(__magic_name__ , 'image_mean' ) )
self.assertTrue(hasattr(__magic_name__ , 'image_std' ) )
def __UpperCAmelCase ( self ) -> Tuple:
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __UpperCAmelCase ( self ) -> Dict:
pass
def __UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_a = image_processing(__magic_name__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self ) -> int:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_a = image_processing(__magic_name__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_a = image_processing(__magic_name__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 168
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """openai/whisper-base"""
_lowerCAmelCase = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_lowerCAmelCase = """transcriber"""
_lowerCAmelCase = WhisperProcessor
_lowerCAmelCase = WhisperForConditionalGeneration
_lowerCAmelCase = ["""audio"""]
_lowerCAmelCase = ["""text"""]
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
return self.pre_processor(__magic_name__ , return_tensors='pt' ).input_features
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
return self.model.generate(inputs=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[str]:
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 168
| 1
|
import requests
from bsa import BeautifulSoup
def __snake_case ( __UpperCamelCase : str = "AAPL" ):
"""simple docstring"""
A_ = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
A_ = BeautifulSoup(requests.get(__UpperCamelCase ).text ,"html.parser" )
A_ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" ,class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 329
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 329
| 1
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="attention" ):
"""simple docstring"""
_lowerCAmelCase = _lowerCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
_lowerCAmelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_lowerCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
_lowerCAmelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_lowerCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
_lowerCAmelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_lowerCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
_lowerCAmelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
if split_mlp_wi:
_lowerCAmelCase = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
_lowerCAmelCase = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
_lowerCAmelCase = (wi_a, wi_a)
else:
_lowerCAmelCase = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
_lowerCAmelCase = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def UpperCamelCase__ ( lowerCAmelCase , *, lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False ):
"""simple docstring"""
_lowerCAmelCase = traverse_util.flatten_dict(variables["""target"""] )
_lowerCAmelCase = {"""/""".join(lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowerCAmelCase = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase )
_lowerCAmelCase = collections.OrderedDict()
# Shared embeddings.
_lowerCAmelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """encoder""" , """pre_attention_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , """encoder""" , """attention""" )
_lowerCAmelCase = layer_norm
_lowerCAmelCase = k.T
_lowerCAmelCase = o.T
_lowerCAmelCase = q.T
_lowerCAmelCase = v.T
# Block i, layer 1 (MLP).
_lowerCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """encoder""" , """pre_mlp_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , """encoder""" , lowerCAmelCase )
_lowerCAmelCase = layer_norm
if split_mlp_wi:
_lowerCAmelCase = wi[0].T
_lowerCAmelCase = wi[1].T
else:
_lowerCAmelCase = wi.T
_lowerCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_lowerCAmelCase = tax_relpos_bias_lookup(
lowerCAmelCase , lowerCAmelCase , """encoder""" ).T
_lowerCAmelCase = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
_lowerCAmelCase = tax_relpos_bias_lookup(
lowerCAmelCase , 0 , """encoder""" ).T
_lowerCAmelCase = tax_relpos_bias_lookup(
lowerCAmelCase , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """pre_self_attention_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """self_attention""" )
_lowerCAmelCase = layer_norm
_lowerCAmelCase = k.T
_lowerCAmelCase = o.T
_lowerCAmelCase = q.T
_lowerCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
_lowerCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """encoder_decoder_attention""" )
_lowerCAmelCase = layer_norm
_lowerCAmelCase = k.T
_lowerCAmelCase = o.T
_lowerCAmelCase = q.T
_lowerCAmelCase = v.T
# Block i, layer 2 (MLP).
_lowerCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """pre_mlp_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , lowerCAmelCase )
_lowerCAmelCase = layer_norm
if split_mlp_wi:
_lowerCAmelCase = wi[0].T
_lowerCAmelCase = wi[1].T
else:
_lowerCAmelCase = wi.T
_lowerCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_lowerCAmelCase = tax_relpos_bias_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" ).T
_lowerCAmelCase = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowerCAmelCase = old["""decoder/logits_dense/kernel"""].T
return new
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
_lowerCAmelCase = state_dict["""shared.weight"""]
return state_dict
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = checkpoints.load_tax_checkpoint(lowerCAmelCase )
_lowerCAmelCase = convert_tax_to_pytorch(
lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase , scalable_attention=lowerCAmelCase )
_lowerCAmelCase = make_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = False , ):
"""simple docstring"""
_lowerCAmelCase = MTaConfig.from_json_file(lowerCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowerCAmelCase = UMTaEncoderModel(lowerCAmelCase )
else:
_lowerCAmelCase = UMTaForConditionalGeneration(lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase )
print("""Done""" )
if __name__ == "__main__":
A__ : Tuple =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
A__ : Union[str, Any] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 70
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
| 0
|
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = []
UpperCamelCase :Tuple = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
UpperCamelCase :int = subprocess.run(__magic_name__ , shell=__magic_name__ , stdout=subprocess.PIPE )
UpperCamelCase :Tuple = output.stdout.decode("""utf-8""" )
UpperCamelCase :Optional[int] = json.loads(__magic_name__ )
UpperCamelCase :Tuple = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__magic_name__ )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(__magic_name__ ) )
if len(__magic_name__ ) > 0:
UpperCamelCase :List[str] = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return values.split(""",""" )
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
UpperCAmelCase_ : Any = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 62
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : str ):
UpperCamelCase :Dict = """hf-internal-testing/tiny-random-t5"""
UpperCamelCase :Optional[int] = AutoTokenizer.from_pretrained(__lowerCamelCase )
UpperCamelCase :Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[Any] = tokenizer("""This is me""" , return_tensors="""pt""" )
UpperCamelCase :int = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCamelCase :List[Any] = model.generate(**__lowerCamelCase )
UpperCamelCase :List[Any] = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
UpperCamelCase :List[str] = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCamelCase :List[Any] = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _A ( self : Optional[int] ):
UpperCamelCase :Dict = """hf-internal-testing/tiny-random-t5"""
UpperCamelCase :Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
UpperCamelCase :int = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase )
| 62
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__a = """huggingface/label-files"""
__a = """imagenet-1k-id2label.json"""
__a = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__a = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__a = {v: k for k, v in idalabel.items()}
__a = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__a = BitConfig(
conv_layer=__lowerCamelCase , num_labels=1000 , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , )
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if "stem.conv" in name:
__a = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
__a = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
__a = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
__a = """bit.""" + name
if "bit" not in name and "classifier" not in name:
__a = """bit.encoder.""" + name
return name
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=False ):
"""simple docstring"""
__a = get_config(__lowerCamelCase )
# load original model from timm
__a = create_model(__lowerCamelCase , pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model
__a = timm_model.state_dict()
for key in state_dict.copy().keys():
__a = state_dict.pop(__lowerCamelCase )
__a = val.squeeze() if """head""" in key else val
# load HuggingFace model
__a = BitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# create image processor
__a = create_transform(**resolve_data_config({} , model=__lowerCamelCase ) )
__a = transform.transforms
__a = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
__a = BitImageProcessor(
do_resize=__lowerCamelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowerCamelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__a = prepare_img()
__a = transform(__lowerCamelCase ).unsqueeze(0 )
__a = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCamelCase , __lowerCamelCase )
# verify logits
with torch.no_grad():
__a = model(__lowerCamelCase )
__a = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
__a = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(f"ybelkada/{model_name}" )
processor.push_to_hub(f"ybelkada/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCamelCase__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a : List[str] = "src/diffusers"
a : str = "."
# This is to make sure the diffusers module imported is the one in the repo.
a : Tuple = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
a : List[str] = spec.loader.load_module()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , __lowerCamelCase ) is not None
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = object_name.split(""".""" )
__UpperCAmelCase : List[Any] = 0
# First let's find the module where our object lives.
__UpperCAmelCase : Optional[Any] = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase , f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
__UpperCAmelCase : List[str] = """"""
__UpperCAmelCase : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__UpperCAmelCase : List[str] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index] , __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCAmelCase : Dict = lines[start_index:line_index]
return "".join(__lowerCamelCase )
a : Any = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
a : Optional[int] = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
a : Dict = re.compile(r"<FILL\s+[^>]*>")
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Optional[Any] = code.split("""\n""" )
__UpperCAmelCase : str = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
__UpperCAmelCase : Tuple = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
__UpperCAmelCase : Optional[Any] = f"""class Bla:\n{code}"""
__UpperCAmelCase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__lowerCamelCase )
__UpperCAmelCase : Dict = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Any = style_docstrings_in_code(__lowerCamelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=False ):
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[Any] = f.readlines()
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
__UpperCAmelCase : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = search.groups()
__UpperCAmelCase : Any = find_code_in_diffusers(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = get_indent(__lowerCamelCase )
__UpperCAmelCase : Tuple = line_index + 1 if indent == theoretical_indent else line_index + 2
__UpperCAmelCase : Any = theoretical_indent
__UpperCAmelCase : Any = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__UpperCAmelCase : int = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
__UpperCAmelCase : List[Any] = lines[line_index]
__UpperCAmelCase : str = _should_continue(__lowerCamelCase , __lowerCamelCase ) and re.search(f"""^{indent}# End copy""" , __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCAmelCase : Optional[int] = lines[start_index:line_index]
__UpperCAmelCase : int = """""".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
__UpperCAmelCase : Tuple = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
__UpperCAmelCase : List[Any] = """\n""".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : List[str] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
__UpperCAmelCase : Any = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = pattern.groups()
__UpperCAmelCase : List[str] = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if option.strip() == "all-casing":
__UpperCAmelCase : List[Any] = re.sub(obja.lower() , obja.lower() , __lowerCamelCase )
__UpperCAmelCase : int = re.sub(obja.upper() , obja.upper() , __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__UpperCAmelCase : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
__UpperCAmelCase : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__UpperCAmelCase : int = lines[:start_index] + [theoretical_code] + lines[line_index:]
__UpperCAmelCase : Union[str, Any] = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ ( __lowerCamelCase : bool = False ):
__UpperCAmelCase : Tuple = glob.glob(os.path.join(__lowerCamelCase , """**/*.py""" ) , recursive=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = []
for filename in all_files:
__UpperCAmelCase : str = is_copy_consistent(__lowerCamelCase , __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Union[str, Any] = """\n""".join(__lowerCamelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[int] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 114
| 0
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase__ : Optional[int] =get_logger(__name__)
def __lowercase ( a__ , a__ , a__ , a__ , a__=0 ) -> int:
os.makedirs(a__ , exist_ok=a__ )
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__SCREAMING_SNAKE_CASE = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__SCREAMING_SNAKE_CASE = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(a__ , a__ )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__SCREAMING_SNAKE_CASE = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(a__ , a__ )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__SCREAMING_SNAKE_CASE = os.path.join(a__ , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(a__ , exist_ok=a__ )
logger.info(f"""Saving model to {ckpt_dir}""" )
__SCREAMING_SNAKE_CASE = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=a__ , storage_writer=dist_cp.FileSystemWriter(a__ ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def __lowercase ( a__ , a__ , a__ , a__ , a__=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(a__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
__SCREAMING_SNAKE_CASE = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
logger.info(f"""Loading model from {input_model_file}""" )
__SCREAMING_SNAKE_CASE = torch.load(a__ )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__SCREAMING_SNAKE_CASE = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
logger.info(f"""Loading model from {input_model_file}""" )
__SCREAMING_SNAKE_CASE = torch.load(a__ )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__SCREAMING_SNAKE_CASE = (
os.path.join(a__ , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
__SCREAMING_SNAKE_CASE = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=a__ , storage_reader=dist_cp.FileSystemReader(a__ ) , planner=DefaultLoadPlanner() , )
__SCREAMING_SNAKE_CASE = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(a__ )
def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__=0 ) -> List[Any]:
os.makedirs(a__ , exist_ok=a__ )
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__SCREAMING_SNAKE_CASE = FSDP.optim_state_dict(a__ , a__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__SCREAMING_SNAKE_CASE = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(a__ , a__ )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
__SCREAMING_SNAKE_CASE = os.path.join(a__ , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(a__ , exist_ok=a__ )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(a__ ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__=0 ) -> str:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__SCREAMING_SNAKE_CASE = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__SCREAMING_SNAKE_CASE = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
__SCREAMING_SNAKE_CASE = torch.load(a__ )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
__SCREAMING_SNAKE_CASE = (
os.path.join(a__ , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
__SCREAMING_SNAKE_CASE = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(a__ ) , )
__SCREAMING_SNAKE_CASE = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
__SCREAMING_SNAKE_CASE = FSDP.optim_state_dict_to_load(a__ , a__ , a__ )
optimizer.load_state_dict(a__ )
| 354
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowercase ( a__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__SCREAMING_SNAKE_CASE = True if 'large' in model_name or 'huge' in model_name else False
__SCREAMING_SNAKE_CASE = True if 'large' in model_name or 'huge' in model_name else False
__SCREAMING_SNAKE_CASE = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
__SCREAMING_SNAKE_CASE = [5, 5, 5, 5]
elif "fl4" in model_name:
__SCREAMING_SNAKE_CASE = [4, 4, 4, 4]
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
if "lrf" in model_name:
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
else:
__SCREAMING_SNAKE_CASE = [2, 2, 2, 2]
if "tiny" in model_name:
__SCREAMING_SNAKE_CASE = 96
elif "small" in model_name:
__SCREAMING_SNAKE_CASE = 96
elif "base" in model_name:
__SCREAMING_SNAKE_CASE = 1_28
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1_92
elif "xlarge" in model_name:
__SCREAMING_SNAKE_CASE = 2_56
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 3_52
# set label information
__SCREAMING_SNAKE_CASE = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__SCREAMING_SNAKE_CASE = 'imagenet-22k-id2label.json'
else:
__SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
__SCREAMING_SNAKE_CASE = {int(a__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = FocalNetConfig(
embed_dim=a__ , depths=a__ , focal_levels=a__ , focal_windows=a__ , use_conv_embed=a__ , idalabel=a__ , labelaid=a__ , use_post_layernorm=a__ , use_layerscale=a__ , )
return config
def __lowercase ( a__ ) -> Any:
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__SCREAMING_SNAKE_CASE = 'encoder.' + name
if "encoder.layers" in name:
__SCREAMING_SNAKE_CASE = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__SCREAMING_SNAKE_CASE = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__SCREAMING_SNAKE_CASE = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__SCREAMING_SNAKE_CASE = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE = 'layernorm.weight'
if name == "norm.bias":
__SCREAMING_SNAKE_CASE = 'layernorm.bias'
if "head" in name:
__SCREAMING_SNAKE_CASE = name.replace('head' , 'classifier' )
else:
__SCREAMING_SNAKE_CASE = 'focalnet.' + name
return name
def __lowercase ( a__ , a__ , a__=False ) -> Dict:
# fmt: off
__SCREAMING_SNAKE_CASE = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
print('Checkpoint URL: ' , a__ )
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(a__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = state_dict.pop(a__ )
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = get_focalnet_config(a__ )
__SCREAMING_SNAKE_CASE = FocalNetForImageClassification(a__ )
model.eval()
# load state dict
model.load_state_dict(a__ )
# verify conversion
__SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__SCREAMING_SNAKE_CASE = BitImageProcessor(
do_resize=a__ , size={'shortest_edge': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=a__ , crop_size=2_24 , do_normalize=a__ , image_mean=a__ , image_std=a__ , )
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw )
__SCREAMING_SNAKE_CASE = processor(images=a__ , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__SCREAMING_SNAKE_CASE = image_transforms(a__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , a__ , atol=1E-4 )
__SCREAMING_SNAKE_CASE = model(**a__ )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__SCREAMING_SNAKE_CASE = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
__SCREAMING_SNAKE_CASE = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
__SCREAMING_SNAKE_CASE = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
__SCREAMING_SNAKE_CASE = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ : List[Any] =parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 118
| 0
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=19 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Any:
'''simple docstring'''
snake_case : Dict = parent
snake_case : List[str] = batch_size
snake_case : str = seq_length
snake_case : Tuple = is_training
snake_case : Tuple = use_input_mask
snake_case : int = use_token_type_ids
snake_case : str = use_labels
snake_case : Optional[int] = vocab_size
snake_case : Union[str, Any] = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Optional[int] = intermediate_size
snake_case : Any = hidden_act
snake_case : Dict = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Optional[Any] = max_position_embeddings
snake_case : List[Any] = type_vocab_size
snake_case : Dict = type_sequence_label_size
snake_case : int = initializer_range
snake_case : int = num_labels
snake_case : Any = num_choices
snake_case : Any = scope
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_input_mask:
snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : str = None
snake_case : int = None
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=UpperCamelCase__ , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = EsmForProteinFolding(config=UpperCamelCase__ ).float()
model.to(UpperCamelCase__ )
model.eval()
snake_case : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
snake_case : List[str] = model(UpperCamelCase__ )
snake_case : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) : Union[str, Any] = config_and_inputs
snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[Any] = (EsmForProteinFolding,) if is_torch_available() else ()
__UpperCAmelCase : Optional[Any] = ()
__UpperCAmelCase : Dict = {} if is_torch_available() else {}
__UpperCAmelCase : Tuple = False
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = EsmFoldModelTester(self )
snake_case : str = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@unittest.skip("Does not support attention outputs" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold only has one output format." )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support input chunking." )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@require_torch
class _lowerCAmelCase ( snake_case_ ):
@slow
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
snake_case : Any = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
snake_case : Union[str, Any] = model(UpperCamelCase__ )["positions"]
snake_case : Optional[int] = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , UpperCamelCase__ , atol=1e-4 ) )
| 203
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__snake_case = logging.get_logger(__name__)
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : Dict = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case : Optional[int] = json.loads(lowercase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case : Optional[int] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case : Any = json.loads(lowercase )
if not mpi_options.get("sagemaker_mpi_enabled" , lowercase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : str = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , UpperCamelCase__ , )
@cached_property
def lowerCamelCase ( self ) -> "torch.device":
'''simple docstring'''
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
snake_case : Optional[Any] = torch.device("cpu" )
snake_case : List[Any] = 0
elif is_sagemaker_model_parallel_available():
snake_case : Tuple = smp.local_rank()
snake_case : int = torch.device("cuda" , UpperCamelCase__ )
snake_case : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
snake_case : Any = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
snake_case : Optional[Any] = torch.device("cuda" , self.local_rank )
snake_case : str = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case : List[str] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case : Optional[Any] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
snake_case : Any = torch.device("cuda" , self.local_rank )
snake_case : Dict = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCamelCase__ )
return device
@property
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
return False
| 203
| 1
|
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : Tuple = TypeVar("T")
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
return (position - 1) // 2
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
return (2 * position) + 1
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
return (2 * position) + 2
class _snake_case ( Generic[T] ):
def __init__( self ) -> None:
'''simple docstring'''
snake_case_ = []
snake_case_ = {}
snake_case_ = 0
def __len__( self ) -> int:
'''simple docstring'''
return self.elements
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.heap )
def lowerCAmelCase__ ( self ) -> bool:
'''simple docstring'''
return self.elements == 0
def lowerCAmelCase__ ( self , a__ , a__ ) -> None:
'''simple docstring'''
self.heap.append((elem, weight) )
snake_case_ = self.elements
self.elements += 1
self._bubble_up(a__ )
def lowerCAmelCase__ ( self ) -> T:
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
snake_case_ , snake_case_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
snake_case_ , snake_case_ = self.heap[0]
self._bubble_down(a__ )
return elem
def lowerCAmelCase__ ( self , a__ , a__ ) -> None:
'''simple docstring'''
snake_case_ = self.position_map[elem]
snake_case_ = (elem, weight)
if position > 0:
snake_case_ = get_parent_position(a__ )
snake_case_ , snake_case_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(a__ )
else:
self._bubble_down(a__ )
else:
self._bubble_down(a__ )
def lowerCAmelCase__ ( self , a__ ) -> None:
'''simple docstring'''
snake_case_ = self.position_map[elem]
if curr_pos == 0:
return None
snake_case_ = get_parent_position(a__ )
snake_case_ , snake_case_ = self.heap[curr_pos]
snake_case_ , snake_case_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(a__ , a__ )
return self._bubble_up(a__ )
return None
def lowerCAmelCase__ ( self , a__ ) -> None:
'''simple docstring'''
snake_case_ = self.position_map[elem]
snake_case_ , snake_case_ = self.heap[curr_pos]
snake_case_ = get_child_left_position(a__ )
snake_case_ = get_child_right_position(a__ )
if child_left_position < self.elements and child_right_position < self.elements:
snake_case_ , snake_case_ = self.heap[child_left_position]
snake_case_ , snake_case_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(a__ , a__ )
return self._bubble_down(a__ )
if child_left_position < self.elements:
snake_case_ , snake_case_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(a__ , a__ )
return self._bubble_down(a__ )
else:
return None
if child_right_position < self.elements:
snake_case_ , snake_case_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(a__ , a__ )
return self._bubble_down(a__ )
return None
def lowerCAmelCase__ ( self , a__ , a__ ) -> None:
'''simple docstring'''
snake_case_ = self.heap[nodea_pos][0]
snake_case_ = self.heap[nodea_pos][0]
snake_case_ , snake_case_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
snake_case_ = nodea_pos
snake_case_ = nodea_pos
class _snake_case ( Generic[T] ):
def __init__( self ) -> None:
'''simple docstring'''
snake_case_ = {}
snake_case_ = 0
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.connections )
def __len__( self ) -> int:
'''simple docstring'''
return self.nodes
def lowerCAmelCase__ ( self , a__ ) -> None:
'''simple docstring'''
if node not in self.connections:
snake_case_ = {}
self.nodes += 1
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> None:
'''simple docstring'''
self.add_node(a__ )
self.add_node(a__ )
snake_case_ = weight
snake_case_ = weight
def UpperCamelCase_( snake_case : GraphUndirectedWeighted[T] , ):
'''simple docstring'''
snake_case_ = {node: maxsize for node in graph.connections}
snake_case_ = {node: None for node in graph.connections}
snake_case_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(snake_case , snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
snake_case_ = priority_queue.extract_min()
snake_case_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
snake_case_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(snake_case , dist[neighbour] )
snake_case_ = node
# running prim's algorithm
while not priority_queue.is_empty():
snake_case_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
snake_case_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(snake_case , dist[neighbour] )
snake_case_ = node
return dist, parent
| 354
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _snake_case :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , a__=1_000 , ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = range_bbox
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = tf.convert_to_tensor(a__ )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMModel(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
snake_case_ = model(a__ , a__ , token_type_ids=a__ )
snake_case_ = model(a__ , a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMForMaskedLM(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = TFLayoutLMForSequenceClassification(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = TFLayoutLMForTokenClassification(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMForQuestionAnswering(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : List[Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[str] = 10
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = TFLayoutLMModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , hidden_size=37 )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFLayoutLMModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = tf.convert_to_tensor([[1_0_1,1_0_1_9,1_0_1_4,1_0_1_6,1_0_3_7,1_2_8_4_9,4_7_4_7,1_0_0_4,1_4_2_4_6,2_2_7_8,5_4_3_9,4_5_2_4,5_0_0_2,2_9_3_0,2_1_9_3,2_9_3_0,4_3_4_1,3_2_0_8,1_0_0_5,1_0_5_5,2_1_7_1,2_8_4_8,1_1_3_0_0,3_5_3_1,1_0_2],[1_0_1,4_0_7_0,4_0_3_4,7_0_2_0,1_0_2_4,3_0_5_8,1_0_1_5,1_0_1_3,2_8_6_1,1_0_1_3,6_0_7_0,1_9_2_7_4,2_7_7_2,6_2_0_5,2_7_8_1_4,1_6_1_4_7,1_6_1_4_7,4_3_4_3,2_0_4_7,1_0_2_8_3,1_0_9_6_9,1_4_3_8_9,1_0_1_2,2_3_3_8,1_0_2]] ) # noqa: E231
snake_case_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
snake_case_ = tf.convert_to_tensor([[[0,0,0,0],[4_2_3,2_3_7,4_4_0,2_5_1],[4_2_7,2_7_2,4_4_1,2_8_7],[4_1_9,1_1_5,4_3_7,1_2_9],[9_6_1,8_8_5,9_9_2,9_1_2],[2_5_6,3_8,3_3_0,5_8],[2_5_6,3_8,3_3_0,5_8],[3_3_6,4_2,3_5_3,5_7],[3_6_0,3_9,4_0_1,5_6],[3_6_0,3_9,4_0_1,5_6],[4_1_1,3_9,4_7_1,5_9],[4_7_9,4_1,5_2_8,5_9],[5_3_3,3_9,6_3_0,6_0],[6_7,1_1_3,1_3_4,1_3_1],[1_4_1,1_1_5,2_0_9,1_3_2],[6_8,1_4_9,1_3_3,1_6_6],[1_4_1,1_4_9,1_8_7,1_6_4],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[2_9_5,1_4_8,3_4_9,1_6_5],[4_4_1,1_4_9,4_9_2,1_6_6],[4_9_7,1_4_9,5_4_6,1_6_4],[6_4,2_0_1,1_2_5,2_1_8],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]],[[0,0,0,0],[6_6_2,1_5_0,7_5_4,1_6_6],[6_6_5,1_9_9,7_4_2,2_1_1],[5_1_9,2_1_3,5_5_4,2_2_8],[5_1_9,2_1_3,5_5_4,2_2_8],[1_3_4,4_3_3,1_8_7,4_5_4],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[3_1_4,4_6_9,3_7_6,4_8_2],[5_0_4,6_8_4,5_8_2,7_0_6],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[6_1_0,7_4_9,6_5_2,7_6_5],[1_3_0,6_5_9,1_6_8,6_7_2],[1_7_6,6_5_7,2_3_7,6_7_2],[2_3_8,6_5_7,3_1_2,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[7_1_6,3_0_1,8_2_5,3_1_7],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]]] ) # noqa: E231
snake_case_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
snake_case_ = tf.convert_to_tensor([[-1_0_0,1_0,1_0,1_0,9,1,-1_0_0,7,7,-1_0_0,7,7,4,2,5,2,8,8,-1_0_0,-1_0_0,5,0,3,2,-1_0_0],[-1_0_0,1_2,1_2,1_2,-1_0_0,1_2,1_0,-1_0_0,-1_0_0,-1_0_0,-1_0_0,1_0,1_2,9,-1_0_0,-1_0_0,-1_0_0,1_0,1_0,1_0,9,1_2,-1_0_0,1_0,-1_0_0]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
# test the sequence output on [0, :3, :3]
snake_case_ = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
snake_case_ = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , a__ , atol=1e-3 ) )
@slow
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(
input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
snake_case_ = outputs.loss
snake_case_ = (2,)
self.assertEqual(loss.shape , a__ )
# test the shape of the logits
snake_case_ = outputs.logits
snake_case_ = (2, 2)
self.assertEqual(logits.shape , a__ )
@slow
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(
input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
# test the shape of the logits
snake_case_ = outputs.logits
snake_case_ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , a__ )
@slow
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
# test the shape of the logits
snake_case_ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , a__ )
self.assertEqual(outputs.end_logits.shape , a__ )
| 92
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ :List[str] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __a ( UpperCAmelCase ):
_a : str = 'data2vec-text'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 329
| 1
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : List[str]=9 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : Any=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[int]=37 , __lowerCAmelCase : List[str]=8 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[Any]=0.0_0_2 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[str]=None , ) -> List[str]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = encoder_seq_length
A__ = decoder_seq_length
# For common tests
A__ = self.decoder_seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = d_ff
A__ = relative_attention_num_buckets
A__ = dropout_rate
A__ = initializer_factor
A__ = eos_token_id
A__ = pad_token_id
A__ = decoder_start_token_id
A__ = None
A__ = decoder_layers
def a_ ( self : int ) -> Dict:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def a_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : int=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
A__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCAmelCase )
if decoder_head_mask is None:
A__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCAmelCase )
if cross_attn_head_mask is None:
A__ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
A__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A__ = input_ids.clamp(self.pad_token_id + 1 )
A__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
A__ = self.get_config()
A__ = config.num_attention_heads
A__ = self.prepare_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, input_dict
def a_ ( self : str ) -> Optional[int]:
"""simple docstring"""
A__ , A__ = self.prepare_config_and_inputs()
return config, inputs_dict
def a_ ( self : Dict ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a_ ( self : Any ) -> Tuple:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
A__ = UMTaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(
input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , )
A__ = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase )
A__ = result.last_hidden_state
A__ = result.past_key_values
A__ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__lowerCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
A__ = UMTaModel(config=__lowerCAmelCase ).get_decoder().to(__lowerCAmelCase ).eval()
# first forward pass
A__ = model(__lowerCAmelCase , use_cache=__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
A__ = model(__lowerCAmelCase , use_cache=__lowerCAmelCase )
self.parent.assertTrue(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) )
self.parent.assertTrue(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) + 1 )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = model(__lowerCAmelCase )["""last_hidden_state"""]
A__ = model(__lowerCAmelCase , past_key_values=__lowerCAmelCase )["""last_hidden_state"""]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -1, random_slice_idx].detach()
A__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
def a_ ( self : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , ) -> List[Any]:
"""simple docstring"""
A__ = UMTaModel(config=__lowerCAmelCase ).to(__lowerCAmelCase ).half().eval()
A__ = model(**__lowerCAmelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__lowerCAmelCase ).any().item() )
@require_torch
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__lowerCamelCase : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__lowerCamelCase : Optional[int] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Any = True
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__lowerCamelCase : Any = [0.8, 0.9]
def a_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
A__ = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
A__ = UMTaModel(config_and_inputs[0] ).to(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__lowerCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=__lowerCAmelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def a_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
A__ = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
A__ = self.model_tester.prepare_config_and_inputs()
A__ = config_and_inputs[0]
A__ = UMTaForConditionalGeneration(__lowerCAmelCase ).eval()
model.to(__lowerCAmelCase )
A__ = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__lowerCAmelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase ),
}
for attn_name, (name, mask) in zip(__lowerCAmelCase , head_masking.items() ):
A__ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
A__ = torch.ones(
config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase )
A__ = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__lowerCAmelCase , return_dict_in_generate=__lowerCAmelCase , **__lowerCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
A__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def a_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A (unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def a_ ( self : str ) -> List[str]:
"""simple docstring"""
A__ = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__lowerCAmelCase ).to(__lowerCAmelCase )
A__ = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__lowerCAmelCase , legacy=__lowerCAmelCase )
A__ = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
A__ = tokenizer(__lowerCAmelCase , return_tensors="""pt""" , padding=__lowerCAmelCase ).input_ids
# fmt: off
A__ = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__lowerCAmelCase , __lowerCAmelCase )
A__ = model.generate(input_ids.to(__lowerCAmelCase ) )
A__ = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
A__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 276
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Optional[int] = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''roberta'''
def __init__( self : Any , __lowerCAmelCase : Tuple=5_02_65 , __lowerCAmelCase : Optional[int]=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : Optional[Any]=30_72 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : Dict=1e-12 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Dict="absolute" , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : str , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276
| 1
|
from math import pi
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 62
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list[float] , snake_case_ : Union[str, Any] ) -> Any:
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(snake_case_ ):
print(f"""{i}\t\t{d}""" )
def lowerCAmelCase_ ( snake_case_ : list[dict[str, int]] , snake_case_ : list[float] , snake_case_ : int ) -> Any:
'''simple docstring'''
for j in range(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase_ ( snake_case_ : list[dict[str, int]] , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> list[float]:
'''simple docstring'''
UpperCAmelCase_ = [float("inf" )] * vertex_count
UpperCAmelCase_ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
UpperCAmelCase_ = distance[u] + w
UpperCAmelCase_ = check_negative_cycle(snake_case_ , snake_case_ , snake_case_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_: Tuple =int(input('Enter number of vertices: ').strip())
SCREAMING_SNAKE_CASE_: Dict =int(input('Enter number of edges: ').strip())
SCREAMING_SNAKE_CASE_: list[dict[str, int]] =[{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: str =(
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
SCREAMING_SNAKE_CASE_: int ={'src': src, 'dst': dest, 'weight': weight}
SCREAMING_SNAKE_CASE_: Any =int(input('\nEnter shortest path source:').strip())
SCREAMING_SNAKE_CASE_: int =bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 106
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ='Hello world! cécé herlolip'
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , snake_case_ : bool ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = FairseqRobertaModel.from_pretrained(snake_case_ )
roberta.eval() # disable dropout
UpperCAmelCase_ = roberta.model.encoder.sentence_encoder
UpperCAmelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , snake_case_ )
UpperCAmelCase_ = XLMRobertaXLForSequenceClassification(snake_case_ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase_ = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase_ = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ = model.roberta.encoder.layer[i]
UpperCAmelCase_ = roberta_sent_encoder.layers[i]
UpperCAmelCase_ = layer.attention
UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase_ = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase_ = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase_ = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase_ = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase_ = roberta_layer.final_layer_norm.weight
UpperCAmelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ = roberta_layer.fca.weight
UpperCAmelCase_ = roberta_layer.fca.bias
# output
UpperCAmelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ = roberta_layer.fca.weight
UpperCAmelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].dense.weight
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].dense.bias
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.weight
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ = roberta.model.encoder.lm_head.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ = roberta.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase_ = model(snake_case_ )[0]
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"](roberta.extract_features(snake_case_ ) )
else:
UpperCAmelCase_ = roberta.model(snake_case_ )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCAmelCase_ = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 106
| 1
|
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ):
lowercase__ : List[Any] = OmegaConf.load(__UpperCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__UpperCamelCase ) ) )
return config
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ):
if conf_path is None:
lowercase__ : List[str] = '''./model_checkpoints/vqgan_only.yaml'''
lowercase__ : Optional[Any] = load_config(__UpperCamelCase , display=__UpperCamelCase )
lowercase__ : int = VQModel(**config.model.params )
if ckpt_path is None:
lowercase__ : List[str] = '''./model_checkpoints/vqgan_only.pt'''
lowercase__ : str = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
if ".ckpt" in ckpt_path:
lowercase__ : str = sd['''state_dict''']
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
model.to(__UpperCamelCase )
del sd
return model
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ , lowercase__ : List[Any] = model.encode(__UpperCamelCase )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
lowercase__ : List[str] = model.decode(__UpperCamelCase )
return xrec
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ):
lowercase__ , lowercase__ : int = string.rsplit('''.''' , 1 )
if reload:
lowercase__ : int = importlib.import_module(__UpperCamelCase )
importlib.reload(__UpperCamelCase )
return getattr(importlib.import_module(__UpperCamelCase , package=__UpperCamelCase ) , cls )
def __UpperCamelCase ( UpperCAmelCase ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=True ):
lowercase__ : int = instantiate_from_config(__UpperCamelCase )
if sd is not None:
model.load_state_dict(__UpperCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# load the specified checkpoint
if ckpt:
lowercase__ : Dict = torch.load(__UpperCamelCase , map_location='''cpu''' )
lowercase__ : Union[str, Any] = pl_sd['''global_step''']
print(F"""loaded model from global step {global_step}.""" )
else:
lowercase__ : List[str] = {'''state_dict''': None}
lowercase__ : Optional[Any] = None
lowercase__ : Optional[int] = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__UpperCamelCase , eval_mode=__UpperCamelCase )['''model''']
return model, global_step
| 198
|
import math
from datetime import datetime, timedelta
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = year % 1_9
SCREAMING_SNAKE_CASE_ = year % 4
SCREAMING_SNAKE_CASE_ = year % 7
SCREAMING_SNAKE_CASE_ = math.floor(year / 1_0_0 )
SCREAMING_SNAKE_CASE_ = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
SCREAMING_SNAKE_CASE_ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE_ = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
SCREAMING_SNAKE_CASE_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE_ = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 1_8 )
else:
return datetime(__UpperCamelCase , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
A : Dict = "will be" if year > datetime.now().year else "was"
print(f"Easter in {year} {tense} {gauss_easter(year)}")
| 118
| 0
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __a )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(__a , 'feat_extract.json' )
feat_extract_first.to_json_file(__a )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.feature_extraction_class()
self.assertIsNotNone(__a )
| 367
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if len(lowerCAmelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = degree
def __add__( self , lowerCAmelCase__ ) -> Polynomial:
if self.degree > polynomial_a.degree:
SCREAMING_SNAKE_CASE = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase__ )
def __sub__( self , lowerCAmelCase__ ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , lowerCAmelCase__ ) -> Polynomial:
SCREAMING_SNAKE_CASE = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> int | float:
SCREAMING_SNAKE_CASE = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
SCREAMING_SNAKE_CASE = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase__ )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def __A ( self ) -> Polynomial:
SCREAMING_SNAKE_CASE = [0] * self.degree
for i in range(self.degree ):
SCREAMING_SNAKE_CASE = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ = 0 ) -> Polynomial:
SCREAMING_SNAKE_CASE = [0] * (self.degree + 2)
SCREAMING_SNAKE_CASE = constant
for i in range(self.degree + 1 ):
SCREAMING_SNAKE_CASE = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase__ )
def __eq__( self , lowerCAmelCase__ ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , lowerCAmelCase__ ) -> bool:
return not self.__eq__(lowerCAmelCase__ )
| 38
| 0
|
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCamelCase__ = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92
| 0
|
import numpy as np
def snake_case( __magic_name__ ) -> np.array:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116
|
def snake_case( __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
lowercase : List[Any] = len(__magic_name__ )
lowercase : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase : List[str] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase : str = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase : Optional[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116
| 1
|
'''simple docstring'''
A__: Union[str, Any] = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> int:
_a : Optional[int] ={"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
_a : Stack[int] =Stack()
_a : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_UpperCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_UpperCAmelCase )
elif i == ")":
# RULE 4
_a : Union[str, Any] =operator_stack.peek()
operator_stack.pop()
_a : int =operand_stack.peek()
operand_stack.pop()
_a : Union[str, Any] =operand_stack.peek()
operand_stack.pop()
_a : Any =operators[opr](_UpperCAmelCase ,_UpperCAmelCase )
operand_stack.push(_UpperCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__: Dict = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 276
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str
__UpperCamelCase : int
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> list[str]:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_a : List[Any] =all_rotations(_UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a : BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCAmelCase ),
}
return response
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_a : List[str] =int(_UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(_UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_a : Optional[int] =[""""""] * len(_UpperCAmelCase )
for _ in range(len(_UpperCAmelCase ) ):
for i in range(len(_UpperCAmelCase ) ):
_a : int =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__: Any = '''Provide a string that I will generate its BWT transform: '''
A__: Union[str, Any] = input(entry_msg).strip()
A__: Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
A__: Union[str, Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 276
| 1
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = StableUnCLIPPipeline
UpperCamelCase_ : List[str] = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCamelCase_ : Optional[Any] = False
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = 3_2
_UpperCAmelCase : List[str] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=lowerCAmelCase__ , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=lowerCAmelCase__ , num_layers=1 , )
torch.manual_seed(0 )
_UpperCAmelCase : Tuple = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=lowerCAmelCase__ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
_UpperCAmelCase : int = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_UpperCAmelCase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase__ , layers_per_block=1 , upcast_attention=lowerCAmelCase__ , use_linear_projection=lowerCAmelCase__ , )
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCAmelCase : Any = AutoencoderKL()
_UpperCAmelCase : int = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict=0 ) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase__ ).startswith("mps" ):
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCAmelCase : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase__ )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
_UpperCAmelCase : Tuple = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : Tuple = pipe("anime turle" , generator=lowerCAmelCase__ , output_type="np" )
_UpperCAmelCase : str = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase : Tuple = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
_UpperCAmelCase : Dict = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : List[str] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 363
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
_UpperCAmelCase : Optional[int] = key.replace("module.encoder", "glpn.encoder" )
if key.startswith("module.decoder" ):
_UpperCAmelCase : List[Any] = key.replace("module.decoder", "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_UpperCAmelCase : Union[str, Any] = key.replace(f"""patch_embed{idx}""", f"""patch_embeddings.{int(a_ )-1}""" )
if "norm" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("norm", "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase : str = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
_UpperCAmelCase : Optional[Any] = key.replace(f"""layer_norm{idx}""", f"""layer_norm.{int(a_ )-1}""" )
if "layer_norm1" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1", "layer_norm_1" )
if "layer_norm2" in key:
_UpperCAmelCase : List[Any] = key.replace("layer_norm2", "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase : Optional[Any] = key[key.find("block" ) + len("block" )]
_UpperCAmelCase : List[str] = key.replace(f"""block{idx}""", f"""block.{int(a_ )-1}""" )
if "attn.q" in key:
_UpperCAmelCase : Optional[int] = key.replace("attn.q", "attention.self.query" )
if "attn.proj" in key:
_UpperCAmelCase : List[str] = key.replace("attn.proj", "attention.output.dense" )
if "attn" in key:
_UpperCAmelCase : Dict = key.replace("attn", "attention.self" )
if "fc1" in key:
_UpperCAmelCase : List[Any] = key.replace("fc1", "dense1" )
if "fc2" in key:
_UpperCAmelCase : List[Any] = key.replace("fc2", "dense2" )
if "linear_pred" in key:
_UpperCAmelCase : Any = key.replace("linear_pred", "classifier" )
if "linear_fuse" in key:
_UpperCAmelCase : Dict = key.replace("linear_fuse.conv", "linear_fuse" )
_UpperCAmelCase : List[str] = key.replace("linear_fuse.bn", "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase : List[Any] = key[key.find("linear_c" ) + len("linear_c" )]
_UpperCAmelCase : Tuple = key.replace(f"""linear_c{idx}""", f"""linear_c.{int(a_ )-1}""" )
if "bot_conv" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("bot_conv", "0.convolution" )
if "skip_conv1" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv1", "1.convolution" )
if "skip_conv2" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv2", "2.convolution" )
if "fusion1" in key:
_UpperCAmelCase : List[str] = key.replace("fusion1", "1.fusion" )
if "fusion2" in key:
_UpperCAmelCase : List[str] = key.replace("fusion2", "2.fusion" )
if "fusion3" in key:
_UpperCAmelCase : Optional[Any] = key.replace("fusion3", "3.fusion" )
if "fusion" in key and "conv" in key:
_UpperCAmelCase : List[Any] = key.replace("conv", "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
_UpperCAmelCase : Optional[int] = key.replace("module.last_layer_depth", "head.head" )
_UpperCAmelCase : int = value
return new_state_dict
def __UpperCAmelCase ( a_: str, a_: List[Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase : Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase : Dict = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( a_: Tuple, a_: Any, a_: Optional[Any]=False, a_: List[Any]=None ):
_UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCAmelCase : Dict = GLPNImageProcessor()
# prepare image
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(images=a_, return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
_UpperCAmelCase : Union[str, Any] = torch.load(a_, map_location=torch.device("cpu" ) )
# rename keys
_UpperCAmelCase : List[str] = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_, a_ )
# create HuggingFace model and load state dict
_UpperCAmelCase : List[str] = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
_UpperCAmelCase : Dict = model(a_ )
_UpperCAmelCase : List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_UpperCAmelCase : Tuple = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_UpperCAmelCase : Dict = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1e-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add model", use_temp_dir=a_, )
image_processor.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add image processor", use_temp_dir=a_, )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__a = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 17
| 0
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Tuple = hf_hub_url(repo_id=A_ , path=A_ , revision=A_ )
assert url == f'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(A_ )}'
| 106
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCamelCase : Tuple = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__UpperCamelCase : Optional[int] = F'''https://www.google.com/search?q={query}&num=100'''
__UpperCamelCase : Optional[Any] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__UpperCamelCase : Union[str, Any] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__UpperCamelCase : str = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 106
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowercase__ : Optional[Any] = (boundary[1] - boundary[0]) / steps
lowercase__ : List[str] = boundary[0]
lowercase__ : Tuple = boundary[1]
lowercase__ : Dict = make_points(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : Any = 0.0
y += (h / 2.0) * f(__lowerCamelCase )
for i in x_i:
# print(i)
y += h * f(__lowerCamelCase )
y += (h / 2.0) * f(__lowerCamelCase )
return y
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : List[str] = a + h
while x < (b - h):
yield x
lowercase__ : Tuple = x + h
def __UpperCAmelCase ( __lowerCamelCase ) -> str: # enter your function here
lowercase__ : Optional[Any] = (x - 0) * (x - 0)
return y
def __UpperCAmelCase ( ) -> Any:
lowercase__ : Union[str, Any] = 0.0 # Lower bound of integration
lowercase__ : Optional[int] = 1.0 # Upper bound of integration
lowercase__ : str = 10.0 # define number of steps or resolution
lowercase__ : Optional[Any] = [a, b] # define boundary of integration
lowercase__ : Optional[Any] = method_a(__lowerCamelCase , __lowerCamelCase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 361
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = '#'
class __A :
'''simple docstring'''
def __init__( self : str ) -> None:
"""simple docstring"""
lowercase__ : dict = {}
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> None:
"""simple docstring"""
lowercase__ : str = self._trie
for char in text:
if char not in trie:
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[Any] = trie[char]
lowercase__ : Dict = True
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> tuple | list:
"""simple docstring"""
lowercase__ : Optional[Any] = self._trie
for char in prefix:
if char in trie:
lowercase__ : Union[str, Any] = trie[char]
else:
return []
return self._elements(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : dict ) -> tuple:
"""simple docstring"""
lowercase__ : str = []
for c, v in d.items():
lowercase__ : List[Any] = [''' '''] if c == END else [(c + s) for s in self._elements(_snake_case )]
result.extend(_snake_case )
return tuple(_snake_case )
lowerCAmelCase_ = Trie()
lowerCAmelCase_ = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def __UpperCAmelCase ( __lowerCamelCase ) -> tuple:
lowercase__ : List[Any] = trie.find_word(__lowerCamelCase )
return tuple(string + word for word in suffixes )
def __UpperCAmelCase ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Tuple=False ) ->List[str]:
'''simple docstring'''
a : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : int , _lowercase : List[Any]=False ) ->int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
a : List[Any] = ""
else:
a : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : Optional[int] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
a : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a : int = in_proj_weight[
: config.hidden_size, :
]
a : Optional[Any] = in_proj_bias[: config.hidden_size]
a : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : Dict = in_proj_weight[
-config.hidden_size :, :
]
a : Optional[int] = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Any:
'''simple docstring'''
a : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->Tuple:
'''simple docstring'''
a : Optional[int] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : str , _lowercase : Any ) ->List[Any]:
'''simple docstring'''
a : Union[str, Any] = dct.pop(_lowercase )
a : Optional[Any] = val
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Dict ) ->List[str]:
'''simple docstring'''
a : Tuple = ViTMSNConfig()
a : List[Any] = 1000
a : str = "datasets/huggingface/label-files"
a : Optional[Any] = "imagenet-1k-id2label.json"
a : str = json.load(open(hf_hub_download(_lowercase , _lowercase ) , "r" ) )
a : Optional[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
a : Dict = idalabel
a : Union[str, Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
a : Any = 384
a : Any = 1536
a : Optional[int] = 6
elif "l16" in checkpoint_url:
a : str = 1024
a : Any = 4096
a : str = 24
a : List[str] = 16
a : Any = 0.1
elif "b4" in checkpoint_url:
a : Optional[Any] = 4
elif "l7" in checkpoint_url:
a : Dict = 7
a : int = 1024
a : List[str] = 4096
a : Optional[int] = 24
a : int = 16
a : List[Any] = 0.1
a : List[str] = ViTMSNModel(_lowercase )
a : Union[str, Any] = torch.hub.load_state_dict_from_url(_lowercase , map_location="cpu" )["target_encoder"]
a : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowercase )
a : int = create_rename_keys(_lowercase , base_model=_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_q_k_v(_lowercase , _lowercase , base_model=_lowercase )
model.load_state_dict(_lowercase )
model.eval()
a : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Union[str, Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
a : List[Any] = ViTImageProcessor(
size=config.image_size , image_mean=_lowercase , image_std=_lowercase )
a : List[Any] = image_processor(images=_lowercase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
a : List[Any] = model(**_lowercase )
a : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
a : Dict = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
a : Union[str, Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
a : Any = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
a : Optional[Any] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
a : Tuple = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowercase , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 105
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Tuple = ShapEImgaImgPipeline
snake_case__ : Optional[Any] = ["""image"""]
snake_case__ : Union[str, Any] = ["""image"""]
snake_case__ : Optional[Any] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case__ : List[str] = False
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Optional[Any] ):
return self.time_input_dim * 4
@property
def _A ( self : Union[str, Any] ):
return 8
@property
def _A ( self : int ):
torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCamelCase :Optional[int] = CLIPVisionModel(__lowerCamelCase )
return model
@property
def _A ( self : str ):
UpperCamelCase :Optional[int] = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def _A ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase :Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase :int = PriorTransformer(**__lowerCamelCase )
return model
@property
def _A ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCamelCase :str = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase :List[str] = ShapERenderer(**__lowerCamelCase )
return model
def _A ( self : str ):
UpperCamelCase :int = self.dummy_prior
UpperCamelCase :Any = self.dummy_image_encoder
UpperCamelCase :Dict = self.dummy_image_processor
UpperCamelCase :List[Any] = self.dummy_renderer
UpperCamelCase :int = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , )
UpperCamelCase :Optional[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _A ( self : int , __lowerCamelCase : int , __lowerCamelCase : Any=0 ):
UpperCamelCase :Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :List[Any] = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _A ( self : List[str] ):
UpperCamelCase :Dict = """cpu"""
UpperCamelCase :List[Any] = self.get_dummy_components()
UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :int = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
UpperCamelCase :Dict = output.images[0]
UpperCamelCase :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase :Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : List[Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _A ( self : List[Any] ):
UpperCamelCase :str = torch_device == """cpu"""
UpperCamelCase :int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , )
def _A ( self : List[Any] ):
UpperCamelCase :List[Any] = self.get_dummy_components()
UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :List[Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Any = 1
UpperCamelCase :int = 2
UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase :str = batch_size * [inputs[key]]
UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Any ):
UpperCamelCase :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
UpperCamelCase :Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
UpperCamelCase :Union[str, Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
UpperCamelCase :List[str] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase :Optional[int] = pipe(
__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 38
| 0
|
import os
from math import logaa
def _A ( SCREAMING_SNAKE_CASE : str = "base_exp.txt" ):
"""simple docstring"""
a__ : Union[str, Any] =0
a__ : Optional[Any] =0
for i, line in enumerate(open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) ):
a__ , a__ : Tuple =list(map(SCREAMING_SNAKE_CASE , line.split("," ) ) )
if x * logaa(SCREAMING_SNAKE_CASE ) > largest:
a__ : List[Any] =x * logaa(SCREAMING_SNAKE_CASE )
a__ : List[str] =i + 1
return result
if __name__ == "__main__":
print(solution())
| 354
|
def _A ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1_000 ):
"""simple docstring"""
a__ : Any =1
a__ : Any =0
for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ):
a__ : list[int] =[]
a__ : int =numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE ):
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE )
a__ : List[str] =divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE )
a__ : List[Any] =now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148
| 0
|
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : int = name
A : Tuple = value
A : List[str] = weight
def __repr__( self ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _lowerCAmelCase ( self ):
return self.value
def _lowerCAmelCase ( self ):
return self.name
def _lowerCAmelCase ( self ):
return self.weight
def _lowerCAmelCase ( self ):
return self.value / self.weight
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Any = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
A : Dict = []
A , A : Optional[Any] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = KandinskyVaaControlnetPipeline
__lowerCamelCase : Optional[int] = ["image_embeds", "negative_image_embeds", "hint"]
__lowerCamelCase : Dict = ["image_embeds", "negative_image_embeds", "hint"]
__lowerCamelCase : List[str] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowerCamelCase : Dict = False
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ):
return 100
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : Any = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A : List[str] = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def _lowerCAmelCase ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.dummy_unet
A : Tuple = self.dummy_movq
A : List[Any] = DDIMScheduler(
num_train_timesteps=1000, beta_schedule="""linear""", beta_start=0.0_0085, beta_end=0.012, clip_sample=lowerCamelCase__, set_alpha_to_one=lowerCamelCase__, steps_offset=1, prediction_type="""epsilon""", thresholding=lowerCamelCase__, )
A : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=0 ):
A : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
A : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create hint
A : int = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith("""mps""" ):
A : Optional[Any] = torch.manual_seed(lowerCamelCase__ )
else:
A : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
A : List[str] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase ( self ):
A : Dict = """cpu"""
A : List[str] = self.get_dummy_components()
A : Dict = self.pipeline_class(**lowerCamelCase__ )
A : Optional[Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : int = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
A : Union[str, Any] = output.images
A : str = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ), return_dict=lowerCamelCase__, )[0]
A : Optional[int] = image[0, -3:, -3:, -1]
A : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A : Dict = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
A : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A : Optional[Any] = torch.from_numpy(np.array(lowerCamelCase__ ) ).float() / 255.0
A : List[str] = hint.permute(2, 0, 1 ).unsqueeze(0 )
A : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""", torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
A : Tuple = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""", torch_dtype=torch.floataa )
A : Union[str, Any] = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
A : Optional[Any] = """A robot, 4k photo"""
A : Union[str, Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
A , A : int = pipe_prior(
lowerCamelCase__, generator=lowerCamelCase__, num_inference_steps=5, negative_prompt="""""", ).to_tuple()
A : Union[str, Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
A : int = pipeline(
image_embeds=lowerCamelCase__, negative_image_embeds=lowerCamelCase__, hint=lowerCamelCase__, generator=lowerCamelCase__, num_inference_steps=100, output_type="""np""", )
A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase__, lowerCamelCase__ )
| 116
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a__ ( snake_case__ ):
_a : Optional[Any] = """Salesforce/blip-image-captioning-base"""
_a : Union[str, Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
_a : List[Any] = """image_captioner"""
_a : Optional[int] = AutoModelForVisionaSeq
_a : Any = ["""image"""]
_a : List[Any] = ["""text"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.pre_processor(images=_A , return_tensors="pt" )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.model.generate(**_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.pre_processor.batch_decode(_A , skip_special_tokens=_A )[0].strip()
| 102
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dummy_uncond_unet
__lowerCAmelCase = ScoreSdeVeScheduler()
__lowerCAmelCase = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_A ).images
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_A , return_dict=_A )[
0
]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "google/ncsnpp-church-256"
__lowerCAmelCase = UNetaDModel.from_pretrained(_A )
__lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(_A )
__lowerCAmelCase = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=1_0 , output_type="numpy" , generator=_A ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 102
| 1
|
from sklearn.metrics import mean_squared_error
import datasets
lowercase__ : int = '''\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'''
lowercase__ : Optional[int] = '''\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'''
lowercase__ : Union[str, Any] = '''\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="uniform_average" , __SCREAMING_SNAKE_CASE=True ) ->Any:
lowerCAmelCase = mean_squared_error(
UpperCAmelCase__ , UpperCAmelCase__ , sample_weight=UpperCAmelCase__ , multioutput=UpperCAmelCase__ , squared=UpperCAmelCase__ )
return {"mse": mse}
| 338
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : str ):
super().__init__()
__lowercase = model
__lowercase = 2
__lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels )
def _lowercase ( self : Optional[int] ):
pass
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = LongformerModel.from_pretrained(UpperCamelCase_)
__lowercase = LightningModel(UpperCamelCase_)
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
__lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase_)
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17
| 0
|
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = len(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCAmelCase : Dict = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 127
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = ['''pixel_values''']
def __init__( self : List[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Tuple , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = size if size is not None else {"height": 384, "width": 384}
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = do_resize
SCREAMING_SNAKE_CASE_: Dict = size
SCREAMING_SNAKE_CASE_: int = resample
SCREAMING_SNAKE_CASE_: str = do_rescale
SCREAMING_SNAKE_CASE_: str = rescale_factor
SCREAMING_SNAKE_CASE_: Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_: List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_: Optional[int] = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
SCREAMING_SNAKE_CASE_: int = (size["height"], size["width"])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : Dict , ):
SCREAMING_SNAKE_CASE_: Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_: str = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_: List[Any] = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: List[Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: int = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: List[str] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: str = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: List[str] = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCAmelCase__)
return encoded_outputs
| 127
| 1
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _A:
"""simple docstring"""
UpperCamelCase : List[Any] = LEDConfig
UpperCamelCase : Any = {}
UpperCamelCase : Optional[Any] = 'gelu'
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=False , _A=99 , _A=32 , _A=2 , _A=4 , _A=37 , _A=0.1 , _A=0.1 , _A=20 , _A=2 , _A=1 , _A=0 , _A=4 , ):
__A : Optional[int] = parent
__A : Optional[Any] = batch_size
__A : Optional[int] = seq_length
__A : str = is_training
__A : Optional[int] = use_labels
__A : Optional[int] = vocab_size
__A : Any = hidden_size
__A : List[str] = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : Optional[int] = intermediate_size
__A : str = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : List[Any] = max_position_embeddings
__A : int = eos_token_id
__A : str = pad_token_id
__A : Optional[int] = bos_token_id
__A : List[str] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__A : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__A : Optional[Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ):
__A : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__A : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__A : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__A : Dict = prepare_led_inputs_dict(__lowercase , __lowercase , __lowercase )
__A : List[Any] = tf.concat(
[tf.zeros_like(__lowercase )[:, :-1], tf.ones_like(__lowercase )[:, -1:]] , axis=-1 , )
__A : Union[str, Any] = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , _A , _A ):
__A : List[str] = TFLEDModel(config=__lowercase ).get_decoder()
__A : str = inputs_dict['input_ids']
__A : str = input_ids[:1, :]
__A : List[Any] = inputs_dict['attention_mask'][:1, :]
__A : Any = 1
# first forward pass
__A : int = model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase )
__A , __A : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__A : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__A : int = tf.concat([input_ids, next_tokens] , axis=-1 )
__A : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__A : Tuple = model(__lowercase , attention_mask=__lowercase )[0]
__A : Optional[int] = model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__A : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__A : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
__A : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 )
def _SCREAMING_SNAKE_CASE ( a , a , a , a=None , a=None , a=None , a=None , ) -> str:
if attention_mask is None:
__A : Tuple = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__A : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__A : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__A : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _A( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase : str = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase : Any = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase : Any = True
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Any = False
def UpperCAmelCase_ ( self ):
__A : Optional[int] = TFLEDModelTester(self )
__A : Dict = ConfigTester(self , config_class=__lowercase )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = tf.zeros_like(inputs_dict['attention_mask'] )
__A : int = 2
__A : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
__A : int = True
__A : Union[str, Any] = self.model_tester.seq_length
__A : Optional[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_A ):
__A : Tuple = outputs.decoder_attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_A ):
__A : Tuple = [t.numpy() for t in outputs.encoder_attentions]
__A : int = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__A : Union[str, Any] = True
__A : Any = False
__A : Tuple = False
__A : List[Any] = model_class(__lowercase )
__A : Union[str, Any] = model(self._prepare_for_class(__lowercase , __lowercase ) )
__A : Any = len(__lowercase )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
if self.is_encoder_decoder:
__A : Optional[Any] = model_class(__lowercase )
__A : List[str] = model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_decoder_attentions_output(__lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : Any = True
__A : Any = model_class(__lowercase )
__A : Optional[Any] = model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
# Check attention is always last and order is fine
__A : List[Any] = True
__A : Union[str, Any] = True
__A : Union[str, Any] = model_class(__lowercase )
__A : str = model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowercase ) )
self.assertEqual(model.config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
pass
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[Any]:
return tf.constant(_SCREAMING_SNAKE_CASE , dtype=tf.intaa )
UpperCAmelCase : List[Any] = 1E-4
@slow
@require_tf
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : List[str] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
__A : Tuple = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__A : Tuple = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__A : Dict = prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
__A : str = model(**__lowercase )[0]
__A : str = (1, 1024, 768)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__A : int = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-3 )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
__A : Tuple = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__A : List[str] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__A : List[Any] = prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
__A : Dict = model(**__lowercase )[0]
__A : List[str] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__A : Optional[int] = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-3 , rtol=1e-3 )
| 280
|
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : Optional[Any] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'wavlm'
def __init__( self :List[str] , a :Optional[Any]=3_2 , a :Union[str, Any]=7_6_8 , a :str=1_2 , a :Dict=1_2 , a :int=3_0_7_2 , a :List[Any]="gelu" , a :Union[str, Any]=0.1 , a :Union[str, Any]=0.1 , a :Optional[Any]=0.1 , a :Dict=0.0 , a :int=0.1 , a :List[str]=0.1 , a :Optional[int]=0.02 , a :Optional[int]=1E-5 , a :Union[str, Any]="group" , a :Dict="gelu" , a :Dict=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , a :Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a :Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , a :str=False , a :Optional[Any]=1_2_8 , a :int=1_6 , a :Optional[int]=3_2_0 , a :Dict=8_0_0 , a :List[str]=False , a :Union[str, Any]=True , a :Optional[Any]=0.05 , a :List[str]=1_0 , a :Optional[Any]=2 , a :List[Any]=0.0 , a :Any=1_0 , a :Union[str, Any]=3_2_0 , a :List[str]=2 , a :Union[str, Any]=0.1 , a :Dict=1_0_0 , a :Any=2_5_6 , a :Optional[int]=2_5_6 , a :List[str]=0.1 , a :Union[str, Any]="mean" , a :Dict=False , a :List[Any]=False , a :List[str]=2_5_6 , a :Union[str, Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , a :Union[str, Any]=(5, 3, 3, 1, 1) , a :int=(1, 2, 3, 1, 1) , a :Dict=5_1_2 , a :Tuple=8_0 , a :Tuple=0 , a :Any=1 , a :Any=2 , a :Optional[Any]=False , a :Tuple=3 , a :List[Any]=2 , a :Optional[int]=3 , a :int=None , **a :Dict , ) -> Optional[Any]:
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
__UpperCamelCase : Dict = hidden_size
__UpperCamelCase : Union[str, Any] = feat_extract_norm
__UpperCamelCase : str = feat_extract_activation
__UpperCamelCase : List[str] = list(a )
__UpperCamelCase : Optional[int] = list(a )
__UpperCamelCase : int = list(a )
__UpperCamelCase : List[Any] = conv_bias
__UpperCamelCase : Union[str, Any] = num_buckets
__UpperCamelCase : Any = max_bucket_distance
__UpperCamelCase : str = num_conv_pos_embeddings
__UpperCamelCase : List[str] = num_conv_pos_embedding_groups
__UpperCamelCase : List[Any] = len(self.conv_dim )
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : Tuple = intermediate_size
__UpperCamelCase : int = hidden_act
__UpperCamelCase : str = num_attention_heads
__UpperCamelCase : Dict = hidden_dropout
__UpperCamelCase : Union[str, Any] = attention_dropout
__UpperCamelCase : Optional[int] = activation_dropout
__UpperCamelCase : Optional[int] = feat_proj_dropout
__UpperCamelCase : str = final_dropout
__UpperCamelCase : List[Any] = layerdrop
__UpperCamelCase : List[str] = layer_norm_eps
__UpperCamelCase : Tuple = initializer_range
__UpperCamelCase : Optional[int] = num_ctc_classes
__UpperCamelCase : str = vocab_size
__UpperCamelCase : str = do_stable_layer_norm
__UpperCamelCase : Dict = use_weighted_layer_sum
__UpperCamelCase : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase : int = apply_spec_augment
__UpperCamelCase : str = mask_time_prob
__UpperCamelCase : Optional[Any] = mask_time_length
__UpperCamelCase : int = mask_time_min_masks
__UpperCamelCase : Optional[int] = mask_feature_prob
__UpperCamelCase : Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
__UpperCamelCase : Any = num_codevectors_per_group
__UpperCamelCase : Union[str, Any] = num_codevector_groups
__UpperCamelCase : Union[str, Any] = contrastive_logits_temperature
__UpperCamelCase : List[Any] = num_negatives
__UpperCamelCase : List[str] = codevector_dim
__UpperCamelCase : Optional[Any] = proj_codevector_dim
__UpperCamelCase : Dict = diversity_loss_weight
# ctc loss
__UpperCamelCase : Optional[int] = ctc_loss_reduction
__UpperCamelCase : Any = ctc_zero_infinity
# adapter
__UpperCamelCase : Optional[int] = add_adapter
__UpperCamelCase : Dict = adapter_kernel_size
__UpperCamelCase : str = adapter_stride
__UpperCamelCase : List[Any] = num_adapter_layers
__UpperCamelCase : Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase : Union[str, Any] = list(a )
__UpperCamelCase : Optional[int] = list(a )
__UpperCamelCase : Union[str, Any] = list(a )
__UpperCamelCase : List[str] = xvector_output_dim
@property
def _lowerCamelCase ( self :Optional[int] ) -> Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 151
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : Optional[int] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = DebertaVaTokenizer
_A = DebertaVaTokenizerFast
_A = True
_A = True
def _lowerCamelCase ( self :int ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase : Any = DebertaVaTokenizer(a , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self :Optional[int] , a :List[str] ) -> List[str]:
__UpperCamelCase : Any = "this is a test"
__UpperCamelCase : Optional[int] = "this is a test"
return input_text, output_text
def _lowerCamelCase ( self :str ) -> Any:
__UpperCamelCase : Optional[Any] = "<pad>"
__UpperCamelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
__UpperCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(a ) , 3_0_0_0_1 )
def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def _lowerCamelCase ( self :List[Any] ) -> str:
# fmt: off
__UpperCamelCase : int = " \tHeLLo!how \n Are yoU? "
__UpperCamelCase : Optional[int] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__UpperCamelCase : Dict = DebertaVaTokenizer(a , do_lower_case=a )
__UpperCamelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = DebertaVaTokenizerFast(a , do_lower_case=a )
__UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCamelCase ( self :str ) -> Any:
pass
def _lowerCamelCase ( self :Tuple ) -> Dict:
# fmt: off
__UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCamelCase : Optional[int] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCamelCase : Dict = DebertaVaTokenizer(a , split_by_punct=a )
__UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = DebertaVaTokenizerFast(a , split_by_punct=a )
__UpperCamelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :List[Any] ) -> str:
# fmt: off
__UpperCamelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCamelCase : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCamelCase : Any = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : Dict = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Dict ) -> Any:
# fmt: off
__UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCamelCase : Tuple = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__UpperCamelCase : Optional[int] = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :List[str] ) -> Tuple:
# fmt: off
__UpperCamelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCamelCase : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCamelCase : List[str] = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : List[str] = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
# fmt: off
__UpperCamelCase : Optional[int] = " \tHeLLo!how \n Are yoU? "
__UpperCamelCase : str = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__UpperCamelCase : int = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : Tuple = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :int ) -> Any:
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : List[Any] = self.get_rust_tokenizer()
__UpperCamelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
__UpperCamelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : str = tokenizer.encode(a , add_special_tokens=a )
__UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[int] = self.get_rust_tokenizer()
__UpperCamelCase : List[Any] = tokenizer.encode(a )
__UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
__UpperCamelCase : Optional[int] = "This is a test"
__UpperCamelCase : List[Any] = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__UpperCamelCase : Tuple = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__UpperCamelCase : Union[str, Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__UpperCamelCase : Union[str, Any] = DebertaVaTokenizer(a , keep_accents=a )
__UpperCamelCase : int = DebertaVaTokenizerFast(a , keep_accents=a )
__UpperCamelCase : Tuple = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : List[str] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
# fmt: off
__UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCamelCase : int = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__UpperCamelCase : Optional[int] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__UpperCamelCase : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__UpperCamelCase : List[str] = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__UpperCamelCase : Dict = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : int = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> str:
__UpperCamelCase : List[Any] = DebertaVaTokenizer(a )
__UpperCamelCase : Optional[int] = tokenizer.encode("sequence builders" )
__UpperCamelCase : Optional[int] = tokenizer.encode("multi-sequence build" )
__UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a )
__UpperCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a , a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a , )
@slow
def _lowerCamelCase ( self :Dict ) -> int:
# fmt: off
__UpperCamelCase : Dict = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 151
| 1
|
'''simple docstring'''
__lowerCAmelCase = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 89
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ):
a__ : Union[str, Any] = XLMRobertaTokenizer
a__ : Optional[int] = XLMRobertaTokenizerFast
a__ : List[str] = True
a__ : List[Any] = True
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Any = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = "<pad>"
snake_case : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1_002 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case : List[str] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : Tuple = tempfile.mkdtemp()
snake_case : Tuple = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : List[str] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
snake_case : Dict = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Tuple = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case : Tuple = tempfile.mkdtemp()
snake_case : str = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE )
snake_case : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case : Dict = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Dict = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
shutil.rmtree(SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case : List[str] = tempfile.mkdtemp()
snake_case : List[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : Optional[Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
shutil.rmtree(SCREAMING_SNAKE_CASE )
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE , f.name )
snake_case : int = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE )
snake_case : Tuple = pickle.dumps(SCREAMING_SNAKE_CASE )
pickle.loads(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : Dict = self.get_rust_tokenizer()
snake_case : Optional[Any] = "I was born in 92000, and this is falsé."
snake_case : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : str = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
snake_case : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = self.get_rust_tokenizer()
snake_case : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE )
snake_case : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = "Hello World!"
snake_case : Optional[Any] = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
snake_case : Dict = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = {"input_ids": [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 148
| 0
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : int=1_8 , lowerCAmelCase_ : Optional[int]=3_0 , lowerCAmelCase_ : str=4_0_0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=True , ) -> Union[str, Any]:
__lowerCAmelCase = size if size is not None else {'height': 1_8, 'width': 1_8}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_normalize
def lowercase ( self : List[str] ) -> Dict:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = ImageGPTImageProcessor if is_vision_available() else None
def lowercase ( self : Union[str, Any] ) -> str:
__lowerCAmelCase = ImageGPTImageProcessingTester(self )
@property
def lowercase ( self : Dict ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'clusters' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_normalize' ) )
def lowercase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCAmelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'image_processor.json' )
image_processor_first.to_json_file(lowerCAmelCase_ )
__lowerCAmelCase = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
__lowerCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
__lowerCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def lowercase ( self : Dict ) -> Union[str, Any]:
pass
def a_ ( ):
__lowerCAmelCase = load_dataset('hf-internal-testing/fixtures_image_utils', split='test' )
__lowerCAmelCase = Image.open(dataset[4]['file'] )
__lowerCAmelCase = Image.open(dataset[5]['file'] )
__lowerCAmelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
__lowerCAmelCase = prepare_images()
# test non-batched
__lowerCAmelCase = image_processing(images[0] , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
__lowerCAmelCase = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
__lowerCAmelCase = image_processing(lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
__lowerCAmelCase = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 207
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_snake_case : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def a_ ( lowerCAmelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead', lowerCAmelCase_, )
if isinstance(lowerCAmelCase_, torch.Tensor ):
return image
elif isinstance(lowerCAmelCase_, PIL.Image.Image ):
__lowerCAmelCase = [image]
if isinstance(image[0], PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowerCAmelCase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__lowerCAmelCase = np.concatenate(lowerCAmelCase_, axis=0 )
__lowerCAmelCase = np.array(lowerCAmelCase_ ).astype(np.floataa ) / 255.0
__lowerCAmelCase = image.transpose(0, 3, 1, 2 )
__lowerCAmelCase = 2.0 * image - 1.0
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
elif isinstance(image[0], torch.Tensor ):
__lowerCAmelCase = torch.cat(lowerCAmelCase_, dim=0 )
return image
def a_ ( lowerCAmelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(lowerCAmelCase_, torch.Tensor ):
return mask
elif isinstance(lowerCAmelCase_, PIL.Image.Image ):
__lowerCAmelCase = [mask]
if isinstance(mask[0], PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = mask[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCAmelCase = [np.array(m.convert('L' ).resize((w, h), resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
__lowerCAmelCase = np.concatenate(lowerCAmelCase_, axis=0 )
__lowerCAmelCase = mask.astype(np.floataa ) / 255.0
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
elif isinstance(mask[0], torch.Tensor ):
__lowerCAmelCase = torch.cat(lowerCAmelCase_, dim=0 )
return mask
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
a_ = 42
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
super().__init__()
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : Dict , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase_ : int = 2_5_0 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
__lowerCAmelCase = image
__lowerCAmelCase = _preprocess_image(lowerCAmelCase_ )
__lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = _preprocess_mask(lowerCAmelCase_ )
__lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCAmelCase = original_image.shape
__lowerCAmelCase = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.device )
__lowerCAmelCase = eta
__lowerCAmelCase = self.scheduler.timesteps[0] + 1
__lowerCAmelCase = generator[0] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowerCAmelCase = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute previous image: x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowerCAmelCase = self.scheduler.undo_step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = t
__lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 207
| 1
|
"""simple docstring"""
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowercase ( _snake_case : str ) ->int:
"""simple docstring"""
__snake_case : List[Any] = 1
for digit in s:
product *= int(_snake_case )
return product
def lowercase ( _snake_case : str = N ) ->int:
"""simple docstring"""
__snake_case : int = -sys.maxsize - 1
__snake_case : int = n[:13]
__snake_case : Any = 13
while cur_index < len(_snake_case ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__snake_case : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__snake_case : Optional[Any] = max(_snake_case , str_eval(_snake_case ) )
__snake_case : Optional[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 102
|
"""simple docstring"""
import math
def lowercase ( _snake_case : int ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( _snake_case : float = 0.1 ) ->int:
"""simple docstring"""
__snake_case : Tuple = 3
__snake_case : Any = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def _snake_case ( A , A , A , A = 100 , ) -> float:
lowerCAmelCase__ = x_start
lowerCAmelCase__ = fnc(A )
lowerCAmelCase__ = 0.0
for _ in range(A ):
# Approximates curve as a sequence of linear lines and sums their length
lowerCAmelCase__ = (x_end - x_start) / steps + xa
lowerCAmelCase__ = fnc(A )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowerCAmelCase__ = xa
lowerCAmelCase__ = fxa
return length
if __name__ == "__main__":
def _snake_case ( A ) -> List[Any]:
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__UpperCAmelCase = 10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 228
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''https://openaipublic.azureedge.net/jukebox/models/'''
__UpperCAmelCase = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def _snake_case ( A ) -> Union[str, Any]:
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCAmelCase__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCAmelCase__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCAmelCase__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCAmelCase__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _snake_case ( A , A , A , A ) -> Optional[int]:
lowerCAmelCase__ = {}
import re
lowerCAmelCase__ = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase__ = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase__ = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase__ = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A ):
lowerCAmelCase__ = re_encoder_block_conv_in.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase__ = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase__ = re_encoder_block_conv_in.sub(A , A )
elif re_encoder_block_resnet.fullmatch(A ):
lowerCAmelCase__ = re_encoder_block_resnet.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase__ = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase__ = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
lowerCAmelCase__ = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase__ = prefix + resnet_block
lowerCAmelCase__ = re_encoder_block_resnet.sub(A , A )
elif re_encoder_block_proj_out.fullmatch(A ):
lowerCAmelCase__ = re_encoder_block_proj_out.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
lowerCAmelCase__ = re_encoder_block_proj_out.sub(A , A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A ):
lowerCAmelCase__ = re_decoder_block_conv_out.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase__ = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase__ = re_decoder_block_conv_out.sub(A , A )
elif re_decoder_block_resnet.fullmatch(A ):
lowerCAmelCase__ = re_decoder_block_resnet.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase__ = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase__ = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
lowerCAmelCase__ = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase__ = prefix + resnet_block
lowerCAmelCase__ = re_decoder_block_resnet.sub(A , A )
elif re_decoder_block_proj_in.fullmatch(A ):
lowerCAmelCase__ = re_decoder_block_proj_in.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
lowerCAmelCase__ = re_decoder_block_proj_in.sub(A , A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A ):
lowerCAmelCase__ = re_prior_cond_conv_out.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase__ = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase__ = re_prior_cond_conv_out.sub(A , A )
elif re_prior_cond_resnet.fullmatch(A ):
lowerCAmelCase__ = re_prior_cond_resnet.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase__ = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase__ = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
lowerCAmelCase__ = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase__ = prefix + resnet_block
lowerCAmelCase__ = re_prior_cond_resnet.sub(A , A )
elif re_prior_cond_proj_in.fullmatch(A ):
lowerCAmelCase__ = re_prior_cond_proj_in.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
lowerCAmelCase__ = re_prior_cond_proj_in.sub(A , A )
# keep original key
else:
lowerCAmelCase__ = original_key
lowerCAmelCase__ = replace_key(A )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
lowerCAmelCase__ = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
lowerCAmelCase__ = original_key
lowerCAmelCase__ = original_key
lowerCAmelCase__ = value
return new_dict
@torch.no_grad()
def _snake_case ( A=None , A=None ) -> str:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
lowerCAmelCase__ = requests.get(F"""{PREFIX}{file}""" , allow_redirects=A )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=A )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , '''wb''' ).write(r.content )
lowerCAmelCase__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCAmelCase__ = JukeboxConfig.from_pretrained(A )
lowerCAmelCase__ = JukeboxModel(A )
lowerCAmelCase__ = []
lowerCAmelCase__ = {}
for i, dict_name in enumerate(A ):
lowerCAmelCase__ = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['''model''']
lowerCAmelCase__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCAmelCase__ = old_dic[k]
elif k.endswith('''.w''' ):
lowerCAmelCase__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCAmelCase__ = old_dic[k]
else:
lowerCAmelCase__ = old_dic[k]
lowerCAmelCase__ = '''vqvae''' if i == 0 else F"""priors.{3 - i}"""
lowerCAmelCase__ = fix_jukebox_keys(A , model.state_dict() , A , A )
weight_dict.append(A )
lowerCAmelCase__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(A )
for i in range(len(A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A ).mkdir(exist_ok=A )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(A , A )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
return weight_dict
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
__UpperCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 228
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 127
|
_SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
_SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = end_pointa[0] - end_pointa[0]
snake_case = end_pointa[1] - end_pointa[1]
snake_case = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = ab[1] * ac[2] - ab[2] * ac[1] # *i
snake_case = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
snake_case = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return tuple(round(UpperCamelCase_ ,UpperCamelCase_ ) for x in vector ) == (0, 0, 0)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 10 ):
"""simple docstring"""
snake_case = create_vector(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = create_vector(UpperCamelCase_ ,UpperCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase_ ,UpperCamelCase_ ) ,UpperCamelCase_ )
| 127
| 1
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def _lowerCamelCase( a ):
__a = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"{test_file} instead." )
__a = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
__a = components[:-1] + [test_fn.replace(".py" , "" )]
__a = ".".join(a )
return test_module_path
def _lowerCamelCase( a ):
__a = get_module_path(a )
__a = importlib.import_module(a )
return test_module
def _lowerCamelCase( a ):
__a = []
__a = get_test_module(a )
for attr in dir(a ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(a , a ) )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _lowerCamelCase( a ):
__a = []
__a = get_test_module(a )
for attr in dir(a ):
__a = getattr(a , a )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a = getattr(a , "all_model_classes" , [] )
if len(a ) > 0:
test_classes.append(a )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _lowerCamelCase( a ):
__a = get_test_classes(a )
__a = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _lowerCamelCase( a ):
__a = test_class()
if hasattr(a , "setUp" ):
test.setUp()
__a = None
if hasattr(a , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a = test.model_tester.__class__
return model_tester
def _lowerCamelCase( a , a ):
__a = get_test_classes(a )
__a = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(a )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _lowerCamelCase( a , a ):
__a = get_test_classes_for_model(a , a )
__a = []
for test_class in test_classes:
__a = get_model_tester_from_test_class(a )
if tester_class is not None:
tester_classes.append(a )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _lowerCamelCase( a ):
__a = get_test_classes(a )
__a = {test_class: get_model_tester_from_test_class(a ) for test_class in test_classes}
return test_tester_mapping
def _lowerCamelCase( a ):
__a = get_model_classes(a )
__a = {
model_class: get_test_classes_for_model(a , a ) for model_class in model_classes
}
return model_test_mapping
def _lowerCamelCase( a ):
__a = get_model_classes(a )
__a = {
model_class: get_tester_classes_for_model(a , a ) for model_class in model_classes
}
return model_to_tester_mapping
def _lowerCamelCase( a ):
if isinstance(a , a ):
return o
elif isinstance(a , a ):
return o.__name__
elif isinstance(a , (list, tuple) ):
return [to_json(a ) for x in o]
elif isinstance(a , a ):
return {to_json(a ): to_json(a ) for k, v in o.items()}
else:
return o
| 268
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _lowerCamelCase( a ):
__a = torch.exp(a )
__a = torch.sum(a , dim=1 ) # sum of exp(x_i)
__a = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(a ) - B / A
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = config.output_attentions
__a = config.output_hidden_states
__a = nn.ModuleList([BertLayer(lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
__a = nn.ModuleList([BertHighway(lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
__a = [-1 for _ in range(config.num_hidden_layers )]
def a__ ( self , lowerCamelCase ):
if (type(lowerCamelCase ) is float) or (type(lowerCamelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__a = x
else:
__a = x
def a__ ( self , lowerCamelCase ):
__a = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
__a = ()
__a = ()
__a = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = layer_module(
lowerCamelCase , lowerCamelCase , head_mask[i] , lowerCamelCase , lowerCamelCase )
__a = layer_outputs[0]
if self.output_attentions:
__a = all_attentions + (layer_outputs[1],)
__a = (hidden_states,)
if self.output_hidden_states:
__a = current_outputs + (all_hidden_states,)
if self.output_attentions:
__a = current_outputs + (all_attentions,)
__a = self.highway[i](lowerCamelCase )
# logits, pooled_output
if not self.training:
__a = highway_exit[0]
__a = entropy(lowerCamelCase )
__a = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__a = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__a = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCamelCase , i + 1 )
else:
__a = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = (hidden_states,)
if self.output_hidden_states:
__a = outputs + (all_hidden_states,)
if self.output_attentions:
__a = outputs + (all_attentions,)
__a = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """, snake_case_, )
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = config
__a = BertEmbeddings(lowerCamelCase )
__a = DeeBertEncoder(lowerCamelCase )
__a = BertPooler(lowerCamelCase )
self.init_weights()
def a__ ( self ):
self.encoder.init_highway_pooler(self.pooler )
def a__ ( self ):
return self.embeddings.word_embeddings
def a__ ( self , lowerCamelCase ):
__a = value
def a__ ( self , lowerCamelCase ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCamelCase )
@add_start_docstrings_to_model_forward(lowerCamelCase )
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__a = input_ids.size()
elif inputs_embeds is not None:
__a = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__a = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__a = torch.ones(lowerCamelCase , device=lowerCamelCase )
if encoder_attention_mask is None:
__a = torch.ones(lowerCamelCase , device=lowerCamelCase )
if token_type_ids is None:
__a = torch.zeros(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__a = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__a = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__a = encoder_attention_mask[:, None, None, :]
__a = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__a = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__a = self.get_head_mask(lowerCamelCase , self.config.num_hidden_layers )
__a = self.embeddings(
input_ids=lowerCamelCase , position_ids=lowerCamelCase , token_type_ids=lowerCamelCase , inputs_embeds=lowerCamelCase )
__a = self.encoder(
lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase )
__a = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase ):
__a = message
__a = exit_layer # start from 1!
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = BertPooler(lowerCamelCase )
__a = nn.Dropout(config.hidden_dropout_prob )
__a = nn.Linear(config.hidden_size , config.num_labels )
def a__ ( self , lowerCamelCase ):
# Pooler
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase )
# "return" pooler_output
# BertModel
__a = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__a = bmodel_output[1]
__a = self.dropout(lowerCamelCase )
__a = self.classifier(lowerCamelCase )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """, snake_case_, )
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = config.num_labels
__a = config.num_hidden_layers
__a = DeeBertModel(lowerCamelCase )
__a = nn.Dropout(config.hidden_dropout_prob )
__a = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase )
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=-1 , lowerCamelCase=False , ):
__a = self.num_layers
try:
__a = self.bert(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__a = outputs[1]
__a = self.dropout(lowerCamelCase )
__a = self.classifier(lowerCamelCase )
__a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__a = e.message
__a = e.exit_layer
__a = outputs[0]
if not self.training:
__a = entropy(lowerCamelCase )
__a = []
__a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__a = []
for highway_exit in outputs[-1]:
__a = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCamelCase )
if train_highway:
__a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__a = (loss,) + outputs
if not self.training:
__a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 268
| 1
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
UpperCAmelCase : str = len(UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = max(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = min(UpperCAmelCase_ )
# create the counting array
UpperCAmelCase : str = coll_max + 1 - coll_min
UpperCAmelCase : Optional[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCAmelCase_ ):
UpperCAmelCase : Dict = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase : Tuple = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCAmelCase_ ) ):
UpperCAmelCase : int = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCamelCase( UpperCAmelCase_ ):
return "".join([chr(UpperCAmelCase_ ) for i in counting_sort([ord(UpperCAmelCase_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 151
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XGLMConfig
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : List[str] = """gelu"""
def __init__( self : Tuple , lowercase_ : str , lowercase_ : List[str]=14 , lowercase_ : Optional[int]=7 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=99 , lowercase_ : Optional[int]=32 , lowercase_ : Any=2 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[int]=37 , lowercase_ : List[str]="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Union[str, Any]=0.02 , ) -> str:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : int = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Optional[int] = d_model
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : List[str] = ffn_dim
UpperCAmelCase : Optional[int] = activation_function
UpperCAmelCase : Optional[Any] = activation_dropout
UpperCAmelCase : Dict = attention_dropout
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = 0
UpperCAmelCase : List[Any] = 2
UpperCAmelCase : Optional[Any] = 1
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def UpperCAmelCase_ ( self : Dict ) -> int:
UpperCAmelCase : Any = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = self.get_config()
UpperCAmelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowercase_ , )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase : List[str] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase_ : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase_ : str = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : str = False
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
UpperCAmelCase : Any = TFXGLMModelTester(self )
UpperCAmelCase : int = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
self.config_tester.run_common_tests()
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = TFXGLMModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : int , lowercase_ : str=True ) -> Any:
UpperCAmelCase : str = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Any = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase : Union[str, Any] = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCAmelCase : int = model.generate(lowercase_ , do_sample=lowercase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : str = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCAmelCase : Dict = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCAmelCase : Tuple = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCAmelCase : int = model.generate(lowercase_ , do_sample=lowercase_ , seed=[7, 0] )
UpperCAmelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase : Dict = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : int ) -> str:
UpperCAmelCase : List[str] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : str = 'left'
# use different length sentences to test batching
UpperCAmelCase : Tuple = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCAmelCase : Union[str, Any] = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
UpperCAmelCase : Any = inputs['input_ids']
UpperCAmelCase : int = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
UpperCAmelCase : Union[str, Any] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCAmelCase : Dict = model.generate(input_ids=lowercase_ , max_new_tokens=12 )
UpperCAmelCase : Tuple = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCAmelCase : List[Any] = model.generate(input_ids=lowercase_ , max_new_tokens=12 )
UpperCAmelCase : List[str] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase : str = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
| 151
| 1
|
'''simple docstring'''
from math import factorial
def __UpperCAmelCase ( a_: int = 100 ):
return sum(int(a_ ) for x in str(factorial(a_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 362
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( a_: list[int] ):
if not nums:
return 0
_UpperCAmelCase : int = nums[0]
_UpperCAmelCase : Dict = 0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase : Any = (
max_excluding + num,
max(a_, a_ ),
)
return max(a_, a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__ : List[str] = logging.get_logger(__name__)
A__ : Optional[int] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """marian"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str], lowerCamelCase : List[str]=58_101, lowerCamelCase : List[str]=None, lowerCamelCase : int=1_024, lowerCamelCase : Union[str, Any]=12, lowerCamelCase : int=4_096, lowerCamelCase : List[str]=16, lowerCamelCase : str=12, lowerCamelCase : List[str]=4_096, lowerCamelCase : List[Any]=16, lowerCamelCase : Dict=0.0, lowerCamelCase : Optional[Any]=0.0, lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str="gelu", lowerCamelCase : str=1_024, lowerCamelCase : Any=0.1, lowerCamelCase : Tuple=0.0, lowerCamelCase : List[str]=0.0, lowerCamelCase : List[str]=0.02, lowerCamelCase : List[str]=58_100, lowerCamelCase : Dict=False, lowerCamelCase : List[Any]=58_100, lowerCamelCase : Optional[Any]=0, lowerCamelCase : Any=0, lowerCamelCase : str=True, **lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = decoder_vocab_size or vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, is_encoder_decoder=lowerCamelCase, decoder_start_token_id=lowerCamelCase, forced_eos_token_id=lowerCamelCase, **lowerCamelCase, )
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase__ ( self : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase__ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase__ = {0: '''batch'''}
lowercase__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''decoder_sequence'''}
lowercase__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase, direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase__ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase__ , lowercase__ = self.num_layers
for i in range(lowerCamelCase ):
lowercase__ = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase__ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowercase__ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase__ ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase__ = super().outputs
else:
lowercase__ = super(lowerCamelCase, self ).outputs
if self.use_past:
lowercase__ , lowercase__ = self.num_layers
for i in range(lowerCamelCase ):
lowercase__ = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase__ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase__ ( self : str, lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, ):
'''simple docstring'''
lowercase__ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Generate decoder inputs
lowercase__ = seq_length if not self.use_past else 1
lowercase__ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowercase__ = dict(**lowerCamelCase, **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__ , lowercase__ = common_inputs['''input_ids'''].shape
lowercase__ = common_inputs['''decoder_input_ids'''].shape[1]
lowercase__ , lowercase__ = self.num_attention_heads
lowercase__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ = decoder_seq_length + 3
lowercase__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase__ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase, lowerCamelCase )], dim=1 )
lowercase__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase__ , lowercase__ = self.num_layers
lowercase__ = min(lowerCamelCase, lowerCamelCase )
lowercase__ = max(lowerCamelCase, lowerCamelCase ) - min_num_layers
lowercase__ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
lowercase__ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase, lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def lowercase__ ( self : Optional[Any], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, ):
'''simple docstring'''
lowercase__ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__ , lowercase__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__ = seqlen + 2
lowercase__ , lowercase__ = self.num_layers
lowercase__ , lowercase__ = self.num_attention_heads
lowercase__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ = common_inputs['''attention_mask'''].dtype
lowercase__ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase, lowerCamelCase, dtype=lowerCamelCase )], dim=1 )
lowercase__ = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def lowercase__ ( self : List[Any], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, ):
'''simple docstring'''
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(
lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = tokenizer.num_special_tokens_to_add(lowerCamelCase )
lowercase__ = compute_effective_axis_dimension(
lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase__ = dict(tokenizer(lowerCamelCase, return_tensors=lowerCamelCase ) )
return common_inputs
def lowercase__ ( self : Any, lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
else:
lowercase__ = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
return common_inputs
def lowercase__ ( self : List[Any], lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : int, lowerCamelCase : Tuple ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase__ = super()._flatten_past_key_values_(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowercase__ = super(lowerCamelCase, self )._flatten_past_key_values_(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
@property
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return 1E-4
| 207
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BlipImageProcessor"""
lowercase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict, lowerCamelCase : Dict, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = False
super().__init__(lowerCamelCase, lowerCamelCase )
lowercase__ = self.image_processor
def __call__( self : int, lowerCamelCase : ImageInput = None, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : Any, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase__ = self.tokenizer
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
return text_encoding
# add pixel_values
lowercase__ = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase )
if text is not None:
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
else:
lowercase__ = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def lowercase__ ( self : Tuple, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[str], *lowerCamelCase : int, **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 207
| 1
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = (DDPMScheduler,)
def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**lowerCAmelCase__ )
return config
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , )
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : str = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : Any = self.get_scheduler_config()
_UpperCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase__ )
_UpperCAmelCase : Any = len(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.dummy_model()
_UpperCAmelCase : List[str] = self.dummy_sample_deter
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase : int = pred_prev_sample
_UpperCAmelCase : int = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCAmelCase : Dict = scheduler_class(**lowerCAmelCase__ )
_UpperCAmelCase : str = len(lowerCAmelCase__ )
_UpperCAmelCase : str = self.dummy_model()
_UpperCAmelCase : Any = self.dummy_sample_deter
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase : str = pred_prev_sample
_UpperCAmelCase : Optional[int] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : Any = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**lowerCAmelCase__ )
_UpperCAmelCase : Tuple = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase__ ):
if i == len(lowerCAmelCase__ ) - 1:
_UpperCAmelCase : Optional[Any] = -1
else:
_UpperCAmelCase : Optional[Any] = timesteps[i + 1]
_UpperCAmelCase : Union[str, Any] = scheduler.previous_timestep(lowerCAmelCase__ )
_UpperCAmelCase : str = prev_t.item()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase__ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase__ , timesteps=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
_UpperCAmelCase : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase__ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
| 17
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( a_: int ):
# A local function to see if a dot lands in the circle.
def is_in_circle(a_: float, a_: float ) -> bool:
_UpperCAmelCase : Optional[Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCAmelCase : str = mean(
int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) )
for _ in range(a_ ) )
# The ratio of the area for circle to square is pi/4.
_UpperCAmelCase : Optional[int] = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def __UpperCAmelCase ( a_: int, a_: Callable[[float], float], a_: float = 0.0, a_: float = 1.0, ):
return mean(
function_to_integrate(uniform(a_, a_ ) ) for _ in range(a_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( a_: int, a_: float = 0.0, a_: float = 1.0 ):
def identity_function(a_: float ) -> float:
return x
_UpperCAmelCase : Union[str, Any] = area_under_curve_estimator(
a_, a_, a_, a_ )
_UpperCAmelCase : List[str] = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def __UpperCAmelCase ( a_: int ):
def function_to_integrate(a_: float ) -> float:
return sqrt(4.0 - x * x )
_UpperCAmelCase : List[str] = area_under_curve_estimator(
a_, a_, 0.0, 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 1
|
def __A ( __lowerCamelCase ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
a = gray_code_sequence_string(__lowerCamelCase )
#
# convert them to integers
for i in range(len(__lowerCamelCase ) ):
a = int(sequence[i] , 2 )
return sequence
def __A ( __lowerCamelCase ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = """0""" + smaller_sequence[i]
sequence.append(__lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = """1""" + smaller_sequence[i]
sequence.append(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : List[str] = logging.get_logger(__name__)
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = WavaVecaForSequenceClassification.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
a = downstream_dict["""projector.weight"""]
a = downstream_dict["""projector.bias"""]
a = downstream_dict["""model.post_net.linear.weight"""]
a = downstream_dict["""model.post_net.linear.bias"""]
return model
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = WavaVecaForAudioFrameClassification.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
a = downstream_dict["""model.linear.weight"""]
a = downstream_dict["""model.linear.bias"""]
return model
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
a = WavaVecaForXVector.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
a = downstream_dict["""connector.weight"""]
a = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
a = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
a = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
a = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
a = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
a = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
a = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = torch.load(__lowerCamelCase , map_location="""cpu""" )
a = checkpoint["""Downstream"""]
a = WavaVecaConfig.from_pretrained(__lowerCamelCase )
a = WavaVecaFeatureExtractor.from_pretrained(
__lowerCamelCase , return_attention_mask=__lowerCamelCase , do_normalize=__lowerCamelCase )
a = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
a = convert_classification(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
a = convert_diarization(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif arch.endswith("""ForXVector""" ):
a = convert_xvector(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
a = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__UpperCamelCase : List[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 228
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = '''distilbert'''
lowerCamelCase :Optional[Any] = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , lowerCAmelCase_=3_05_22 , lowerCAmelCase_=5_12 , lowerCAmelCase_=False , lowerCAmelCase_=6 , lowerCAmelCase_=12 , lowerCAmelCase_=7_68 , lowerCAmelCase_=4 * 7_68 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.02 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.2 , lowerCAmelCase_=0 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = max_position_embeddings
_A = sinusoidal_pos_embds
_A = n_layers
_A = n_heads
_A = dim
_A = hidden_dim
_A = dropout
_A = attention_dropout
_A = activation
_A = initializer_range
_A = qa_dropout
_A = seq_classif_dropout
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 81
|
def snake_case ( snake_case__ :int , snake_case__ :int) -> str:
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1))
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 81
| 1
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : List[str] = len(A__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCAmelCase_ : Tuple = i + 1
else:
UpperCAmelCase_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 11, 15], 9) = }')
| 268
|
"""simple docstring"""
from torch import nn
def snake_case ( A__ ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 268
| 1
|
"""simple docstring"""
lowerCamelCase = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 363
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase = 250_004
lowerCamelCase = 250_020
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartTokenizer
UpperCamelCase = MBartTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = MBartTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ = MBartTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''facebook/mbart-large-en-ro'''
UpperCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCamelCase = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250026, 250001] )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = MBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 241
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.