code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = {}
__UpperCAmelCase = job['''started_at''']
__UpperCAmelCase = job['''completed_at''']
__UpperCAmelCase = date_parser.parse(snake_case_ )
__UpperCAmelCase = date_parser.parse(snake_case_ )
__UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__UpperCAmelCase = start
__UpperCAmelCase = end
__UpperCAmelCase = duration_in_min
return job_info
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any]=None ):
__UpperCAmelCase = None
if token is not None:
__UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
__UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__UpperCAmelCase = requests.get(snake_case_ , headers=snake_case_ ).json()
__UpperCAmelCase = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} )
__UpperCAmelCase = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(snake_case_ ):
__UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=snake_case_ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
_lowercase : Tuple = parser.parse_args()
_lowercase : List[str] = get_job_time(args.workflow_run_id)
_lowercase : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 49
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49
| 1
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
lowercase = True
from torch.cuda.amp import autocast
lowercase = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
lowerCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCAmelCase = field(
default=__UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCAmelCase = field(
default=__UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowerCAmelCase = field(
default=__UpperCAmelCase , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
lowerCAmelCase = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowerCAmelCase = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowerCAmelCase = field(
default=0.999995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def __UpperCAmelCase ( a_ , a_):
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout)] , )
snake_case_ = logging.WARNING
if model_args.verbose_logging:
snake_case_ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
snake_case_ = logging.INFO
logger.setLevel(_snake_case)
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
lowerCAmelCase = field(
default=__UpperCAmelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase = field(
default=__UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowerCAmelCase = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
lowerCAmelCase = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
lowerCAmelCase = field(
default=__UpperCAmelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCAmelCase = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowerCAmelCase = field(
default=__UpperCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCAmelCase = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = "longest"
lowerCAmelCase = None
lowerCAmelCase = None
def __call__( self , a ) -> List[Any]:
# reformat list to dict and set to pytorch format
snake_case_ = self.feature_extractor.pad(
UpperCAmelCase_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
snake_case_ = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
snake_case_ = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
snake_case_ = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
snake_case_ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
snake_case_ = 1
snake_case_ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
snake_case_ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase_ , min_masks=2 , )
return batch
class UpperCamelCase_ ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , *a , a=1 , a=0 , a=1.0 , **a ) -> Optional[Any]:
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = 0
snake_case_ = max_gumbel_temp
snake_case_ = min_gumbel_temp
snake_case_ = gumbel_temp_decay
def _UpperCamelCase ( self , a , a ) -> List[str]:
model.train()
snake_case_ = self._prepare_inputs(UpperCAmelCase_ )
if self.use_amp:
with autocast():
snake_case_ = self.compute_loss(UpperCAmelCase_ , UpperCAmelCase_ )
else:
snake_case_ = self.compute_loss(UpperCAmelCase_ , UpperCAmelCase_ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
snake_case_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case_ = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
snake_case_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase_ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
configure_logger(_snake_case , _snake_case)
# Downloading and loading a dataset from the hub.
snake_case_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
snake_case_ = DatasetDict()
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
snake_case_ = DatasetDict()
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case)
def prepare_dataset(a_):
# check that all files have the correct sampling rate
snake_case_ , snake_case_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate)
return batch
# load audio files into numpy arrays
snake_case_ = datasets.map(
_snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names)
# filter audio files that are too long
snake_case_ = vectorized_datasets.filter(
lambda a_: len(data['speech']) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate))
def normalize(a_):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate)
# normalize and transform to `BatchFeatures`
snake_case_ = vectorized_datasets.map(
_snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
snake_case_ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'')
snake_case_ = WavaVecaForPreTraining(_snake_case)
snake_case_ = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case)
snake_case_ = WavaVecaPreTrainer(
model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 711
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __UpperCAmelCase ( a_):
if isinstance(a_ , torch.Tensor):
return image
elif isinstance(a_ , PIL.Image.Image):
snake_case_ = [image]
snake_case_ = [trans(img.convert('RGB')) for img in image]
snake_case_ = torch.stack(a_)
return image
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a ) -> List[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a , scheduler=a )
def _UpperCamelCase ( self , a ) -> List[str]:
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _UpperCamelCase ( self , a , a , a ) -> Any:
# get the original timestep using init_timestep
snake_case_ = min(int(num_inference_steps * strength ) , a )
snake_case_ = max(num_inference_steps - init_timestep , 0 )
snake_case_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCamelCase ( self , a , a , a , a , a , a=None ) -> List[Any]:
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}''' )
snake_case_ = image.to(device=a , dtype=a )
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(a )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ = init_latents.shape
snake_case_ = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
print('add noise to latents at timestep' , a )
snake_case_ = self.scheduler.add_noise(a , a , a )
snake_case_ = init_latents
return latents
@torch.no_grad()
def __call__( self , a = None , a = 0.8 , a = 1 , a = None , a = 0.0 , a = 50 , a = None , a = "pil" , a = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(a )
# 2. Preprocess image
snake_case_ = preprocess(a )
# 3. set timesteps
self.scheduler.set_timesteps(a , device=self.device )
snake_case_ , snake_case_ = self.get_timesteps(a , a , self.device )
snake_case_ = timesteps[:1].repeat(a )
# 4. Prepare latent variables
snake_case_ = self.prepare_latents(a , a , a , self.unet.dtype , self.device , a )
snake_case_ = latents
# 5. Denoising loop
for t in self.progress_bar(a ):
# 1. predict noise model_output
snake_case_ = self.unet(a , a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case_ = self.scheduler.step(
a , a , a , eta=a , use_clipped_model_output=a , generator=a , ).prev_sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(a )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=a )
| 607
| 0
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> list[int]:
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
SCREAMING_SNAKE_CASE__ : Any = [True] * (num + 1)
SCREAMING_SNAKE_CASE__ : str = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a :Dict = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 680
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a :Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a :str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
SCREAMING_SNAKE_CASE__ : Dict = value
else:
SCREAMING_SNAKE_CASE__ : Tuple = value
return new_state_dict
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : str = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE__ : Any = in_proj_bias[:256]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Any = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[:256]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE__ : int = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias_cross_attn[-256:]
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = 800 if """detection""" in checkpoint_url else 1000
SCREAMING_SNAKE_CASE__ : List[str] = target_max_size / current_max_size
SCREAMING_SNAKE_CASE__ : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = F.to_tensor(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
logger.info("""Converting model...""" )
# load original state dict
SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE__ : Optional[int] = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ : Tuple = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = 15
SCREAMING_SNAKE_CASE__ : Any = 2
SCREAMING_SNAKE_CASE__ : str = {0: """table""", 1: """table rotated"""}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE__ : Tuple = 125
SCREAMING_SNAKE_CASE__ : str = 6
SCREAMING_SNAKE_CASE__ : List[Any] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
SCREAMING_SNAKE_CASE__ : Any = idalabel
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
SCREAMING_SNAKE_CASE__ : Tuple = TableTransformerForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE__ : Dict = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
SCREAMING_SNAKE_CASE__ : Tuple = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = Image.open(__lowerCAmelCase ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Dict = model(__lowerCAmelCase )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : List[Any] = (1, 15, 3)
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
SCREAMING_SNAKE_CASE__ : Dict = (1, 125, 7)
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
SCREAMING_SNAKE_CASE__ : List[Any] = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(__lowerCAmelCase )
image_processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
a :Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a :int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 680
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( _UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase = CLIPTokenizer
__UpperCAmelCase = CLIPTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = {}
__UpperCAmelCase = False
def __a ( self ):
super().setUp()
# fmt: off
UpperCamelCase__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCamelCase__ = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
UpperCamelCase__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
UpperCamelCase__ = {"""unk_token""": """<unk>"""}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase__ ) )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def __a ( self , a ):
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = """lower newer"""
return input_text, output_text
def __a ( self ):
UpperCamelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
UpperCamelCase__ = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
@require_ftfy
def __a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
UpperCamelCase__ = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
UpperCamelCase__ = tokenizer_s.tokenize(lowercase__ )
UpperCamelCase__ = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = """xa\u0303y""" + """ """ + """x\xe3y"""
UpperCamelCase__ = tokenizer_s.tokenize(lowercase__ )
UpperCamelCase__ = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(lowercase__ )
UpperCamelCase__ = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(lowercase__ )
UpperCamelCase__ = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def __a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
UpperCamelCase__ = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
UpperCamelCase__ = f''' {text}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
UpperCamelCase__ = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
def __a ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowercase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __a ( self ):
super().test_tokenization_python_rust_equals()
def __a ( self ):
# CLIP always lower cases letters
pass
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Dict = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 223
| 0
|
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__snake_case =trt.Logger(trt.Logger.WARNING)
__snake_case =absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__snake_case =logging.getLogger(__name__)
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
__snake_case =parser.parse_args()
if args.tokenizer_name:
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
__snake_case =args.per_device_eval_batch_size
__snake_case =(args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__snake_case =True
__snake_case ='''temp_engine/bert-fp32.engine'''
if args.fpaa:
__snake_case ='''temp_engine/bert-fp16.engine'''
if args.inta:
__snake_case ='''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
__snake_case =1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__snake_case =[network.get_input(i) for i in range(network.num_inputs)]
__snake_case =[_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__snake_case =1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__snake_case =builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__snake_case =builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def a_ ( lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ):
lowerCAmelCase = np.asarray(inputs['input_ids'] , dtype=np.intaa )
lowerCAmelCase = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
lowerCAmelCase = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _UpperCamelCase )
# start time
lowerCAmelCase = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCamelCase ) for d_inp in d_inputs] + [int(_UpperCamelCase ), int(_UpperCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
cuda.memcpy_dtoh_async(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase = time.time()
lowerCAmelCase = end_time - start_time
lowerCAmelCase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__snake_case =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__snake_case =load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__snake_case =raw_datasets['''validation'''].column_names
__snake_case ='''question''' if '''question''' in column_names else column_names[0]
__snake_case ='''context''' if '''context''' in column_names else column_names[1]
__snake_case ='''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__snake_case =tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__snake_case =min(args.max_seq_length, tokenizer.model_max_length)
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=_UpperCamelCase , stride=args.doc_stride , return_overflowing_tokens=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase = tokenized_examples.sequence_ids(_UpperCamelCase )
lowerCAmelCase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__snake_case =raw_datasets['''validation''']
# Validation Feature Creation
__snake_case =eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
__snake_case =default_data_collator
__snake_case =eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
__snake_case =DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def a_ ( lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]="eval" ):
lowerCAmelCase = postprocess_qa_predictions(
examples=_UpperCamelCase , features=_UpperCamelCase , predictions=_UpperCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_UpperCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCamelCase , label_ids=_UpperCamelCase )
__snake_case =load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def a_ ( lowerCamelCase : Union[str, Any] ):
return trt.volume(engine.get_binding_shape(_UpperCamelCase ) ) * engine.get_binding_dtype(_UpperCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
__snake_case =[cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__snake_case =cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__snake_case =cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__snake_case =cuda.mem_alloc(h_outputa.nbytes)
__snake_case =cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__snake_case =cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(F''' Num examples = {len(eval_dataset)}''')
logger.info(F''' Batch size = {args.per_device_eval_batch_size}''')
__snake_case =0.0
__snake_case =0
__snake_case =timeit.default_timer()
__snake_case =None
for step, batch in enumerate(eval_dataloader):
__snake_case =model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__snake_case =outputs
__snake_case =torch.tensor(start_logits)
__snake_case =torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__snake_case =accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__snake_case =accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__snake_case =(accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__snake_case =logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__snake_case =nested_truncate(all_preds, len(eval_dataset))
__snake_case =timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1_000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1_000))
logger.info("""Total Number of Inference = %d""", niter)
__snake_case =post_processing_function(eval_examples, eval_dataset, all_preds)
__snake_case =metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'''Evaluation metrics: {eval_metric}''')
| 133
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
def lowercase__ ( _UpperCamelCase) -> Dict:
"""simple docstring"""
UpperCamelCase = r'\w+[.]\d+'
UpperCamelCase = re.findall(_UpperCamelCase , _UpperCamelCase)
for pat in pats:
UpperCamelCase = key.replace(_UpperCamelCase , '_'.join(pat.split('.')))
return key
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Tuple:
"""simple docstring"""
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key)
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase = pt_tensor.transpose(2 , 3 , 1 , 0)
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
UpperCamelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=42) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase = flax_model.init_weights(PRNGKey(_UpperCamelCase))
UpperCamelCase = flatten_dict(_UpperCamelCase)
UpperCamelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase = rename_key(_UpperCamelCase)
UpperCamelCase = tuple(renamed_pt_key.split('.'))
# Correctly rename weight parameters
UpperCamelCase , UpperCamelCase = rename_key_and_reshape_tensor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.')
# also add unexpected weight so that warning is thrown
UpperCamelCase = jnp.asarray(_UpperCamelCase)
return unflatten_dict(_UpperCamelCase)
| 280
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
a_ : str = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ["""DPTFeatureExtractor"""]
a_ : Tuple = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=1 / 255 , UpperCamelCase=True , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_pad
def snake_case ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def snake_case ( self , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
lowerCamelCase_ ,lowerCamelCase_ = image.size
else:
lowerCamelCase_ ,lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size["shortest_edge"] * h / w )
lowerCamelCase_ = self.size["shortest_edge"]
elif w > h:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = self.size["shortest_edge"]
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ ,lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
lowerCamelCase_ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = DetrImageProcessor if is_vision_available() else None
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = DetrImageProcessingTester(self )
@property
def snake_case ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(UpperCamelCase , "rescale_factor" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase , "size" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_pad" ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ):
"""simple docstring"""
# prepare image and target
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"image_id": 3_9769, "annotations": target}
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
lowerCamelCase_ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase ) )
@slow
def snake_case ( self ):
"""simple docstring"""
# prepare image, target and masks_path
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowerCamelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
lowerCamelCase_ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase ) )
# verify masks
lowerCamelCase_ = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCamelCase )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase ) )
| 445
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__lowercase = random.Random()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if rng is None:
A_ = global_rng
A_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowercase ( unittest.TestCase ):
def __init__( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any]=7 , lowerCamelCase__ : List[str]=4_0_0 , lowerCamelCase__ : Tuple=2_0_0_0 , lowerCamelCase__ : Optional[int]=2_0_4_8 , lowerCamelCase__ : Tuple=1_2_8 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : List[str]=5_1_2 , lowerCamelCase__ : List[str]=3_0 , lowerCamelCase__ : Dict=4_4_1_0_0 , ) -> Union[str, Any]:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = min_seq_length
A_ = max_seq_length
A_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A_ = spectrogram_length
A_ = feature_size
A_ = num_audio_channels
A_ = hop_length
A_ = chunk_length
A_ = sampling_rate
def UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
def _flatten(lowerCamelCase__ : Any ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
A_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A_ = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : Dict = TvltFeatureExtractor
def UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
A_ = TvltFeatureExtractionTester(self )
def UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
A_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''sampling_rate''' ) )
def UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
A_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
A_ = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
A_ = feat_extract_first.to_dict()
A_ = feat_extract_second.to_dict()
A_ = dict_first.pop('''mel_filters''' )
A_ = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
A_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
A_ = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
A_ = feat_extract_first.to_dict()
A_ = feat_extract_second.to_dict()
A_ = dict_first.pop('''mel_filters''' )
A_ = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
A_ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A_ = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
A_ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A_ = feature_extractor(lowerCamelCase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A_ = feature_extractor(
lowerCamelCase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=lowerCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A_ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
A_ = np.asarray(lowerCamelCase__ )
A_ = feature_extractor(lowerCamelCase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
A_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
A_ = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
A_ = self._load_datasamples(1 )
A_ = TvltFeatureExtractor()
A_ = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
A_ = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase__ , atol=1e-4 ) )
| 203
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( __lowerCamelCase ):
_lowercase : Any = 'table-transformer'
_lowercase : List[Any] = ['past_key_values']
_lowercase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[Any] , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Any=3 , lowerCamelCase__ : Any=1_0_0 , lowerCamelCase__ : int=6 , lowerCamelCase__ : List[Any]=2_0_4_8 , lowerCamelCase__ : List[str]=8 , lowerCamelCase__ : int=6 , lowerCamelCase__ : List[str]=2_0_4_8 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : str=True , lowerCamelCase__ : Optional[Any]="relu" , lowerCamelCase__ : Optional[int]=2_5_6 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Dict=0.02 , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : Optional[int]="sine" , lowerCamelCase__ : str="resnet50" , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Any=1 , lowerCamelCase__ : int=5 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : Optional[Any]=5 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : List[str]=0.1 , **lowerCamelCase__ : List[str] , ) -> Dict:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ = backbone_config.get('''model_type''' )
A_ = CONFIG_MAPPING[backbone_model_type]
A_ = config_class.from_dict(lowerCamelCase__ )
# set timm attributes to None
A_ ,A_ ,A_ = None, None, None
A_ = use_timm_backbone
A_ = backbone_config
A_ = num_channels
A_ = num_queries
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = init_xavier_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = encoder_layers
A_ = auxiliary_loss
A_ = position_embedding_type
A_ = backbone
A_ = use_pretrained_backbone
A_ = dilation
# Hungarian matcher
A_ = class_cost
A_ = bbox_cost
A_ = giou_cost
# Loss coefficients
A_ = mask_loss_coefficient
A_ = dice_loss_coefficient
A_ = bbox_loss_coefficient
A_ = giou_loss_coefficient
A_ = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
return self.d_model
class _lowercase ( __lowerCamelCase ):
_lowercase : Tuple = version.parse('1.11' )
@property
def UpperCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1e-5
@property
def UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return 1_2
| 203
| 1
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int]="attention" ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
SCREAMING_SNAKE_CASE__ = (wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , *, __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(variables["""target"""] )
SCREAMING_SNAKE_CASE__ = {"""/""".join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE__ = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE__ = old["""token_embedder/embedding"""]
# Encoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """encoder""" , """pre_attention_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """encoder""" , """attention""" )
SCREAMING_SNAKE_CASE__ = layer_norm
SCREAMING_SNAKE_CASE__ = k.T
SCREAMING_SNAKE_CASE__ = o.T
SCREAMING_SNAKE_CASE__ = q.T
SCREAMING_SNAKE_CASE__ = v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """encoder""" , """pre_mlp_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """encoder""" , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = wi[0].T
SCREAMING_SNAKE_CASE__ = wi[1].T
else:
SCREAMING_SNAKE_CASE__ = wi.T
SCREAMING_SNAKE_CASE__ = wo.T
SCREAMING_SNAKE_CASE__ = old[
"""encoder/relpos_bias/rel_embedding"""
].T
SCREAMING_SNAKE_CASE__ = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """pre_self_attention_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """self_attention""" )
SCREAMING_SNAKE_CASE__ = layer_norm
SCREAMING_SNAKE_CASE__ = k.T
SCREAMING_SNAKE_CASE__ = o.T
SCREAMING_SNAKE_CASE__ = q.T
SCREAMING_SNAKE_CASE__ = v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """pre_cross_attention_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """encoder_decoder_attention""" )
SCREAMING_SNAKE_CASE__ = layer_norm
SCREAMING_SNAKE_CASE__ = k.T
SCREAMING_SNAKE_CASE__ = o.T
SCREAMING_SNAKE_CASE__ = q.T
SCREAMING_SNAKE_CASE__ = v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """pre_mlp_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = wi[0].T
SCREAMING_SNAKE_CASE__ = wi[1].T
else:
SCREAMING_SNAKE_CASE__ = wi.T
SCREAMING_SNAKE_CASE__ = wo.T
SCREAMING_SNAKE_CASE__ = old["""decoder/decoder_norm/scale"""]
SCREAMING_SNAKE_CASE__ = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE__ = old["""decoder/logits_dense/kernel"""].T
return new
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""]
return state_dict
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str = False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE__ = TaEncoderModel(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE__ = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE_ )
print("""Done""" )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
__lowerCamelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 700
|
from math import log
from scipy.constants import Boltzmann, physical_constants
__lowerCamelCase : int = 300 # TEMPERATURE (unit = K)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 379
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( a__ , a__ , a__ , a__=1_0_2_4):
'''simple docstring'''
a_ , a_ : Tuple = [], []
a_ : str = list(zip(a__ , a__))
a_ , a_ : Optional[Any] = sorted_examples[0]
def is_too_big(a__):
return tok(a__ , return_tensors="""pt""").input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:]):
a_ : str = new_src + """ """ + src
a_ : Dict = new_tgt + """ """ + tgt
if is_too_big(a__) or is_too_big(a__): # cant fit, finalize example
finished_src.append(a__)
finished_tgt.append(a__)
a_ , a_ : Tuple = src, tgt
else: # can fit, keep adding
a_ , a_ : List[str] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a__)
finished_tgt.append(a__)
return finished_src, finished_tgt
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : Any = Path(a__)
save_path.mkdir(exist_ok=a__)
for split in ["train"]:
a_ , a_ : List[Any] = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
a_ : Dict = [x.rstrip() for x in Path(a__).open().readlines()]
a_ : Any = [x.rstrip() for x in Path(a__).open().readlines()]
a_ , a_ : Union[str, Any] = pack_examples(a__ , a__ , a__ , a__)
print(f'''packed {split} split from {len(a__)} examples -> {len(a__)}.''')
Path(save_path / f'''{split}.source''').open("""w""").write("""\n""".join(a__))
Path(save_path / f'''{split}.target''').open("""w""").write("""\n""".join(a__))
for split in ["val", "test"]:
a_ , a_ : int = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(a__ , save_path / f'''{split}.source''')
shutil.copyfile(a__ , save_path / f'''{split}.target''')
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=a__ , help="""like facebook/bart-large-cnn,t5-base, etc.""")
parser.add_argument("""--max_seq_len""" , type=a__ , default=1_2_8)
parser.add_argument("""--data_dir""" , type=a__)
parser.add_argument("""--save_path""" , type=a__)
a_ : Any = parser.parse_args()
a_ : Any = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(a__ , Path(args.data_dir) , args.max_seq_len , args.save_path)
if __name__ == "__main__":
packer_cli()
| 540
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
with open(a__) as metadata_file:
a_ : Any = json.load(a__)
a_ : Dict = LukeConfig(use_entity_aware_attention=a__ , **metadata["""model_config"""])
# Load in the weights from the checkpoint_path
a_ : str = torch.load(a__ , map_location="""cpu""")
# Load the entity vocab file
a_ : List[str] = load_entity_vocab(a__)
a_ : int = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""])
# Add special tokens to the token vocabulary for downstream tasks
a_ : Optional[Any] = AddedToken("""<ent>""" , lstrip=a__ , rstrip=a__)
a_ : int = AddedToken("""<ent2>""" , lstrip=a__ , rstrip=a__)
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]})
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''')
tokenizer.save_pretrained(a__)
with open(os.path.join(a__ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""]) , """w""") as f:
json.dump(a__ , a__)
a_ : List[str] = LukeTokenizer.from_pretrained(a__)
# Initialize the embeddings of the special tokens
a_ : Optional[int] = state_dict["""embeddings.word_embeddings.weight"""]
a_ : List[str] = word_emb[tokenizer.convert_tokens_to_ids(["""@"""])[0]].unsqueeze(0)
a_ : List[Any] = word_emb[tokenizer.convert_tokens_to_ids(["""#"""])[0]].unsqueeze(0)
a_ : Optional[Any] = torch.cat([word_emb, ent_emb, enta_emb])
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers):
for matrix_name in ["query.weight", "query.bias"]:
a_ : Any = f'''encoder.layer.{layer_index}.attention.self.'''
a_ : List[str] = state_dict[prefix + matrix_name]
a_ : List[Any] = state_dict[prefix + matrix_name]
a_ : Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ : str = state_dict["""entity_embeddings.entity_embeddings.weight"""]
a_ : int = entity_emb[entity_vocab["""[MASK]"""]]
a_ : int = LukeModel(config=a__).eval()
a_ , a_ : Optional[int] = model.load_state_dict(a__ , strict=a__)
if not (len(a__) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(a__)}. Expected only missing embeddings.position_ids''')
if not (all(key.startswith("""entity_predictions""") or key.startswith("""lm_head""") for key in unexpected_keys)):
raise ValueError(
"""Unexpected keys"""
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions") or key.startswith("lm_head"))])}''')
# Check outputs
a_ : List[Any] = LukeTokenizer.from_pretrained(a__ , task="""entity_classification""")
a_ : str = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
a_ : str = (3_9, 4_2)
a_ : Tuple = tokenizer(a__ , entity_spans=[span] , add_prefix_space=a__ , return_tensors="""pt""")
a_ : List[str] = model(**a__)
# Verify word hidden states
if model_size == "large":
a_ : Optional[int] = torch.Size((1, 4_2, 1_0_2_4))
a_ : List[str] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]])
else: # base
a_ : List[str] = torch.Size((1, 4_2, 7_6_8))
a_ : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]])
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''')
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-4):
raise ValueError
# Verify entity hidden states
if model_size == "large":
a_ : Dict = torch.Size((1, 1, 1_0_2_4))
a_ : int = torch.tensor([[0.0466, -0.0106, -0.0179]])
else: # base
a_ : Optional[Any] = torch.Size((1, 1, 7_6_8))
a_ : str = torch.tensor([[0.1457, 0.1044, 0.0174]])
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''')
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a__ , atol=1e-4):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(a__))
model.save_pretrained(a__)
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = {}
with open(a__ , """r""" , encoding="""utf-8""") as f:
for index, line in enumerate(a__):
a_ , a_ : List[Any] = line.rstrip().split("""\t""")
a_ : Any = index
return entity_vocab
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__snake_case : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 540
| 1
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Dict = len(__A )
for i in range(1 , __A ):
lowerCAmelCase_ : Union[str, Any] = collection[i]
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Optional[Any] = i - 1
while low <= high:
lowerCAmelCase_ : List[str] = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase_ : Union[str, Any] = mid - 1
else:
lowerCAmelCase_ : int = mid + 1
for j in range(__A , __A , -1 ):
lowerCAmelCase_ : Tuple = collection[j - 1]
lowerCAmelCase_ : Union[str, Any] = val
return collection
if __name__ == "__main__":
lowercase__ : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ : int = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 701
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ : int = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317
| 0
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCAmelCase = datasets.logging.get_logger(__name__)
UpperCAmelCase = '''\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'''
UpperCAmelCase = '''\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'''
UpperCAmelCase = '''\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'''
def __UpperCamelCase ( lowercase__ : List[Any], lowercase__ : str, lowercase__ : List[Any]=False, lowercase__ : int=False, lowercase__ : List[str]=True, lowercase__ : Union[str, Any]=False, lowercase__ : Optional[int]="dummy_doc" ):
'''simple docstring'''
__lowercase ={doc: key_lines}
__lowercase ={doc: sys_lines}
__lowercase ={}
__lowercase =0
__lowercase =0
__lowercase =0
__lowercase =0
__lowercase =0
__lowercase =0
__lowercase =reader.get_doc_mentions(UpperCamelCase__, key_doc_lines[doc], UpperCamelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowercase =reader.set_annotated_parse_trees(UpperCamelCase__, key_doc_lines[doc], UpperCamelCase__, UpperCamelCase__ )
__lowercase =reader.get_doc_mentions(UpperCamelCase__, sys_doc_lines[doc], UpperCamelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowercase =reader.set_annotated_parse_trees(UpperCamelCase__, key_doc_lines[doc], UpperCamelCase__, UpperCamelCase__ )
if remove_nested:
__lowercase =reader.remove_nested_coref_mentions(UpperCamelCase__, UpperCamelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowercase =reader.remove_nested_coref_mentions(UpperCamelCase__, UpperCamelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowercase =reader.get_mention_assignments(UpperCamelCase__, UpperCamelCase__ )
__lowercase =reader.get_mention_assignments(UpperCamelCase__, UpperCamelCase__ )
__lowercase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'Number of resulting singleton clusters in the key '
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'files, respectively' )
return doc_coref_infos
def __UpperCamelCase ( lowercase__ : Union[str, Any], lowercase__ : List[str], lowercase__ : Any, lowercase__ : Optional[int], lowercase__ : Dict, lowercase__ : Optional[Any], lowercase__ : Any ):
'''simple docstring'''
__lowercase =get_coref_infos(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
__lowercase ={}
__lowercase =0
__lowercase =0
for name, metric in metrics:
__lowercase =evaluator.evaluate_documents(UpperCamelCase__, UpperCamelCase__, beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ), F'''Recall: {recall * 1_00:.2f}''', F''' Precision: {precision * 1_00:.2f}''', F''' F1: {fa * 1_00:.2f}''', )
if conll_subparts_num == 3:
__lowercase =(conll / 3) * 1_00
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({'conll_score': conll} )
return output_scores
def __UpperCamelCase ( lowercase__ : Dict ):
'''simple docstring'''
__lowercase =False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
__lowercase =line.split()[5]
if not parse_col == "-":
__lowercase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def snake_case ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def snake_case ( self : Optional[int] , __lowercase : str , __lowercase : Union[str, Any] , __lowercase : int=True , __lowercase : Optional[int]=False , __lowercase : Union[str, Any]=False , __lowercase : Optional[int]=False ):
"""simple docstring"""
__lowercase =[
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
__lowercase =util.check_gold_parse_annotation(__lowercase )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowercase =evaluate(
key_lines=__lowercase , sys_lines=__lowercase , metrics=__lowercase , NP_only=__lowercase , remove_nested=__lowercase , keep_singletons=__lowercase , min_span=__lowercase , )
return score
| 119
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a_ = logging.getLogger(__name__)
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
if os.path.exists(UpperCamelCase__ ):
if os.path.exists(os.path.join(UpperCamelCase__, '''config.json''' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__, '''config.json''' ) ):
os.remove(os.path.join(UpperCamelCase__, '''config.json''' ) )
if os.path.exists(os.path.join(UpperCamelCase__, '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__, '''pytorch_model.bin''' ) ):
os.remove(os.path.join(UpperCamelCase__, '''pytorch_model.bin''' ) )
else:
os.makedirs(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =2
if unlogit:
SCREAMING_SNAKE_CASE__ : Any =torch.pow(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =p * torch.log(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =0
return -plogp.sum(dim=-1 )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
logger.info('''lv, h >\t''' + '''\t'''.join(f"{x + 1}" for x in range(len(UpperCamelCase__ ) ) ) )
for row in range(len(UpperCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:d}" for x in tensor[row].cpu().data ) )
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : int=True, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =model.config.num_hidden_layers, model.config.num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.zeros(UpperCamelCase__, UpperCamelCase__ ).to(args.device )
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.zeros(UpperCamelCase__, UpperCamelCase__ ).to(args.device )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : str =torch.ones(UpperCamelCase__, UpperCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Dict =0.0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0.0
for step, inputs in enumerate(tqdm(UpperCamelCase__, desc='''Iteration''', disable=args.local_rank not in [-1, 0] ) ):
SCREAMING_SNAKE_CASE__ : List[str] =tuple(t.to(args.device ) for t in inputs )
((SCREAMING_SNAKE_CASE__) , ) : List[Any] =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
SCREAMING_SNAKE_CASE__ : List[Any] =model(UpperCamelCase__, labels=UpperCamelCase__, head_mask=UpperCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =entropy(attn.detach(), UpperCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
SCREAMING_SNAKE_CASE__ : str =2
SCREAMING_SNAKE_CASE__ : str =torch.pow(torch.pow(UpperCamelCase__, UpperCamelCase__ ).sum(-1 ), 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
SCREAMING_SNAKE_CASE__ : Tuple =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(UpperCamelCase__ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(UpperCamelCase__ )
logger.info('''Head ranked by importance scores''' )
SCREAMING_SNAKE_CASE__ : Tuple =torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device )
SCREAMING_SNAKE_CASE__ : str =torch.arange(
head_importance.numel(), device=args.device )
SCREAMING_SNAKE_CASE__ : str =head_ranks.view_as(UpperCamelCase__ )
print_ad_tensor(UpperCamelCase__ )
return attn_entropy, head_importance, total_loss
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =compute_heads_importance(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, compute_entropy=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''', UpperCamelCase__, original_score * args.masking_threshold )
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.ones_like(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max(1, int(new_head_mask.numel() * args.masking_amount ) )
SCREAMING_SNAKE_CASE__ : str =original_score
while current_score >= original_score * args.masking_threshold:
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
SCREAMING_SNAKE_CASE__ : Optional[int] =float('''Inf''' )
SCREAMING_SNAKE_CASE__ : List[str] =head_importance.view(-1 ).sort()[1]
if len(UpperCamelCase__ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
SCREAMING_SNAKE_CASE__ : Optional[Any] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''', str(current_heads_to_mask.tolist() ) )
SCREAMING_SNAKE_CASE__ : List[str] =new_head_mask.view(-1 )
SCREAMING_SNAKE_CASE__ : Any =0.0
SCREAMING_SNAKE_CASE__ : str =new_head_mask.view_as(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =new_head_mask.clone().detach()
print_ad_tensor(UpperCamelCase__ )
# Compute metric and head importance again
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =compute_heads_importance(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, compute_entropy=UpperCamelCase__, head_mask=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''', UpperCamelCase__, new_head_mask.sum(), new_head_mask.sum() / new_head_mask.numel() * 1_0_0, )
logger.info('''Final head mask''' )
print_ad_tensor(UpperCamelCase__ )
np.save(os.path.join(args.output_dir, '''head_mask.npy''' ), head_mask.detach().cpu().numpy() )
return head_mask
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =datetime.now()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =compute_heads_importance(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, compute_entropy=UpperCamelCase__, compute_importance=UpperCamelCase__, head_mask=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =1 / loss
SCREAMING_SNAKE_CASE__ : Tuple =datetime.now() - before_time
SCREAMING_SNAKE_CASE__ : Optional[Any] =sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE__ : Optional[int] ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
v,
]
assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE__ : Optional[int] =datetime.now()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =compute_heads_importance(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, compute_entropy=UpperCamelCase__, compute_importance=UpperCamelCase__, head_mask=UpperCamelCase__, actually_pruned=UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : Dict =1 / loss
SCREAMING_SNAKE_CASE__ : Union[str, Any] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''', UpperCamelCase__, UpperCamelCase__, pruned_num_params / original_num_params * 1_0_0, )
logger.info('''Pruning: score with masking: %f score with pruning: %f''', UpperCamelCase__, UpperCamelCase__ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''', original_time / new_time * 1_0_0 )
save_model(UpperCamelCase__, args.output_dir )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''', default=UpperCamelCase__, type=UpperCamelCase__, required=UpperCamelCase__, help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''', )
parser.add_argument(
'''--model_name_or_path''', default=UpperCamelCase__, type=UpperCamelCase__, required=UpperCamelCase__, help='''Path to pretrained model or model identifier from huggingface.co/models''', )
parser.add_argument(
'''--output_dir''', default=UpperCamelCase__, type=UpperCamelCase__, required=UpperCamelCase__, help='''The output directory where the model predictions and checkpoints will be written.''', )
# Other parameters
parser.add_argument(
'''--config_name''', default='''''', type=UpperCamelCase__, help='''Pretrained config name or path if not the same as model_name_or_path''', )
parser.add_argument(
'''--tokenizer_name''', default='''''', type=UpperCamelCase__, help='''Pretrained tokenizer name or path if not the same as model_name_or_path''', )
parser.add_argument(
'''--cache_dir''', default=UpperCamelCase__, type=UpperCamelCase__, help='''Where do you want to store the pre-trained models downloaded from s3''', )
parser.add_argument(
'''--data_subset''', type=UpperCamelCase__, default=-1, help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''', action='''store_true''', help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''', action='''store_true''', help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''', action='''store_true''', help='''Don\'t normalize all importance scores between 0 and 1''', )
parser.add_argument(
'''--try_masking''', action='''store_true''', help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''', default=0.9, type=UpperCamelCase__, help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''', )
parser.add_argument(
'''--masking_amount''', default=0.1, type=UpperCamelCase__, help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''', default='''acc''', type=UpperCamelCase__, help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''', default=1_2_8, type=UpperCamelCase__, help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
), )
parser.add_argument('''--batch_size''', default=1, type=UpperCamelCase__, help='''Batch size.''' )
parser.add_argument('''--seed''', type=UpperCamelCase__, default=4_2 )
parser.add_argument('''--local_rank''', type=UpperCamelCase__, default=-1, help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''', action='''store_true''', help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''', type=UpperCamelCase__, default='''''', help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''', type=UpperCamelCase__, default='''''', help='''Can be used for distant debugging.''' )
SCREAMING_SNAKE_CASE__ : List[str] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
SCREAMING_SNAKE_CASE__ : List[Any] =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.device('''cuda''', args.local_rank )
SCREAMING_SNAKE_CASE__ : Any =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device, args.n_gpu, bool(args.local_rank != -1 ) ) )
SCREAMING_SNAKE_CASE__ : Any =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
SCREAMING_SNAKE_CASE__ : str =nn.parallel.DistributedDataParallel(
UpperCamelCase__, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=UpperCamelCase__ )
elif args.n_gpu > 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =nn.DataParallel(UpperCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir, exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__, os.path.join(args.output_dir, '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''', UpperCamelCase__ )
# Prepare dataset
SCREAMING_SNAKE_CASE__ : List[str] =np.concatenate(
[
np.loadtxt(args.data_dir, dtype=np.intaa ),
] )
SCREAMING_SNAKE_CASE__ : Dict =(torch.from_numpy(UpperCamelCase__ ),)
SCREAMING_SNAKE_CASE__ : Any =TensorDataset(*UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =RandomSampler(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =DataLoader(UpperCamelCase__, sampler=UpperCamelCase__, batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
SCREAMING_SNAKE_CASE__ : Any =mask_heads(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
prune_heads(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
main()
| 296
| 0
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Optional[Any]:
_lowerCamelCase = fname.split(os.path.sep )[-1]
return re.search(r'''^(.*)_\d+\.jpg$''' , lowercase_ ).groups()[0]
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
_lowerCamelCase = file_names
_lowerCamelCase = image_transform
_lowerCamelCase = label_to_id
def __len__( self ):
return len(self.file_names )
def __getitem__( self , lowerCamelCase__ ):
_lowerCamelCase = self.file_names[idx]
_lowerCamelCase = PIL.Image.open(lowerCamelCase__ )
_lowerCamelCase = raw_image.convert('''RGB''' )
if self.image_transform is not None:
_lowerCamelCase = self.image_transform(lowerCamelCase__ )
_lowerCamelCase = extract_label(lowerCamelCase__ )
if self.label_to_id is not None:
_lowerCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[Any] ) -> Union[str, Any]:
# Initialize accelerator
if args.with_tracking:
_lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase = config['''lr''']
_lowerCamelCase = int(config['''num_epochs'''] )
_lowerCamelCase = int(config['''seed'''] )
_lowerCamelCase = int(config['''batch_size'''] )
_lowerCamelCase = config['''image_size''']
if not isinstance(lowercase_ , (list, tuple) ):
_lowerCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
_lowerCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_lowerCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
_lowerCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_lowerCamelCase = os.path.split(lowercase_ )[-1].split('''.''' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Grab all the image filenames
_lowerCamelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
_lowerCamelCase = [extract_label(lowercase_ ) for fname in file_names]
_lowerCamelCase = list(set(lowercase_ ) )
id_to_label.sort()
_lowerCamelCase = {lbl: i for i, lbl in enumerate(lowercase_ )}
# Set the seed before splitting the data.
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# Split our filenames between train and validation
_lowerCamelCase = np.random.permutation(len(lowercase_ ) )
_lowerCamelCase = int(0.8 * len(lowercase_ ) )
_lowerCamelCase = random_perm[:cut]
_lowerCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_lowerCamelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] )
_lowerCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# For evaluation, we use a deterministic Resize
_lowerCamelCase = Compose([Resize(lowercase_ ), ToTensor()] )
_lowerCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# Instantiate dataloaders.
_lowerCamelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
_lowerCamelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase = create_model('''resnet50d''' , pretrained=lowercase_ , num_classes=len(lowercase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_lowerCamelCase = False
for param in model.get_classifier().parameters():
_lowerCamelCase = True
# We normalize the batches of images to be a bit faster.
_lowerCamelCase = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
_lowerCamelCase = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_lowerCamelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
_lowerCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
_lowerCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
_lowerCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_lowerCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_lowerCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_lowerCamelCase = os.path.splitext(lowercase_ )[0]
if "epoch" in training_difference:
_lowerCamelCase = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
_lowerCamelCase = None
else:
_lowerCamelCase = int(training_difference.replace('''step_''' , '''''' ) )
_lowerCamelCase = resume_step // len(lowercase_ )
resume_step -= starting_epoch * len(lowercase_ )
# Now we train the model
for epoch in range(lowercase_ , lowercase_ ):
model.train()
if args.with_tracking:
_lowerCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_lowerCamelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_lowerCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCamelCase = (batch['''image'''] - mean) / std
_lowerCamelCase = model(lowercase_ )
_lowerCamelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_lowerCamelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
model.eval()
_lowerCamelCase = 0
_lowerCamelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCamelCase = (batch['''image'''] - mean) / std
with torch.no_grad():
_lowerCamelCase = model(lowercase_ )
_lowerCamelCase = outputs.argmax(dim=-1 )
_lowerCamelCase , _lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''label''']) )
_lowerCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_lowerCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {1_00 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 1_00 * eval_metric,
'''train_loss''': total_loss.item() / len(lowercase_ ),
'''epoch''': epoch,
} , step=lowercase_ , )
if checkpointing_steps == "epoch":
_lowerCamelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
_lowerCamelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=lowercase_ , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=lowercase_ , default=lowercase_ , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=lowercase_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=lowercase_ , default=lowercase_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=lowercase_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 2_24}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 705
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623
| 0
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
_UpperCamelCase : List[str] = sum(UpperCAmelCase_ ) / len(UpperCAmelCase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
|
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
_UpperCamelCase : Optional[Any] = Path(UpperCAmelCase_ ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
_UpperCamelCase : Tuple = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , UpperCAmelCase_ , with_cuda=UpperCAmelCase_ , extra_include_paths=[str(UpperCAmelCase_ )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 195
| 1
|
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def __A ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = credit_card_number
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
__SCREAMING_SNAKE_CASE : Any = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
__SCREAMING_SNAKE_CASE : Tuple = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def __A ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(_SCREAMING_SNAKE_CASE ) <= 1_6:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 702
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __lowerCamelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = load_tool("text-to-speech" )
self.tool.setup()
def a_ ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = self.tool("hey" )
__SCREAMING_SNAKE_CASE : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
def a_ ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tool("hey" )
__SCREAMING_SNAKE_CASE : Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
| 564
| 0
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[str] =TypeVar('T')
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
return (position - 1) // 2
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
return (2 * position) + 1
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
return (2 * position) + 2
class _UpperCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
_lowerCamelCase : list[tuple[T, int]] = []
_lowerCamelCase : dict[T, int] = {}
_lowerCamelCase : int = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def a__ ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def a__ ( self , _lowercase , _lowercase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_lowerCamelCase : List[Any] = self.elements
self.elements += 1
self._bubble_up(_lowercase )
def a__ ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_lowerCamelCase, _lowerCamelCase : List[str] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.heap[0]
self._bubble_down(_lowercase )
return elem
def a__ ( self , _lowercase , _lowercase ) -> None:
# Update the weight of the given key
_lowerCamelCase : Optional[Any] = self.position_map[elem]
_lowerCamelCase : int = (elem, weight)
if position > 0:
_lowerCamelCase : List[str] = get_parent_position(_lowercase )
_lowerCamelCase, _lowerCamelCase : Dict = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_lowercase )
else:
self._bubble_down(_lowercase )
else:
self._bubble_down(_lowercase )
def a__ ( self , _lowercase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_lowerCamelCase : str = self.position_map[elem]
if curr_pos == 0:
return None
_lowerCamelCase : int = get_parent_position(_lowercase )
_lowerCamelCase, _lowerCamelCase : List[Any] = self.heap[curr_pos]
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_lowercase , _lowercase )
return self._bubble_up(_lowercase )
return None
def a__ ( self , _lowercase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_lowerCamelCase : List[str] = self.position_map[elem]
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.heap[curr_pos]
_lowerCamelCase : str = get_child_left_position(_lowercase )
_lowerCamelCase : Any = get_child_right_position(_lowercase )
if child_left_position < self.elements and child_right_position < self.elements:
_lowerCamelCase, _lowerCamelCase : int = self.heap[child_left_position]
_lowerCamelCase, _lowerCamelCase : Dict = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_lowercase , _lowercase )
return self._bubble_down(_lowercase )
if child_left_position < self.elements:
_lowerCamelCase, _lowerCamelCase : Dict = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_lowercase , _lowercase )
return self._bubble_down(_lowercase )
else:
return None
if child_right_position < self.elements:
_lowerCamelCase, _lowerCamelCase : int = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_lowercase , _lowercase )
return self._bubble_down(_lowercase )
return None
def a__ ( self , _lowercase , _lowercase ) -> None:
# Swap the nodes at the given positions
_lowerCamelCase : List[Any] = self.heap[nodea_pos][0]
_lowerCamelCase : Tuple = self.heap[nodea_pos][0]
_lowerCamelCase, _lowerCamelCase : Dict = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_lowerCamelCase : Optional[Any] = nodea_pos
_lowerCamelCase : Optional[int] = nodea_pos
class _UpperCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
_lowerCamelCase : dict[T, dict[T, int]] = {}
_lowerCamelCase : int = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def a__ ( self , _lowercase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_lowerCamelCase : Optional[Any] = {}
self.nodes += 1
def a__ ( self , _lowercase , _lowercase , _lowercase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_lowercase )
self.add_node(_lowercase )
_lowerCamelCase : Dict = weight
_lowerCamelCase : Optional[Any] = weight
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , ) ->tuple[dict[T, int], dict[T, T | None]]:
_lowerCamelCase : dict[T, int] = {node: maxsize for node in graph.connections}
_lowerCamelCase : dict[T, T | None] = {node: None for node in graph.connections}
_lowerCamelCase : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if priority_queue.is_empty():
return dist, parent
# initialization
_lowerCamelCase : str = priority_queue.extract_min()
_lowerCamelCase : Any = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowerCamelCase : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE_ , dist[neighbour] )
_lowerCamelCase : List[str] = node
# running prim's algorithm
while not priority_queue.is_empty():
_lowerCamelCase : Dict = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowerCamelCase : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE_ , dist[neighbour] )
_lowerCamelCase : Optional[int] = node
return dist, parent
| 434
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__snake_case = Features({"""text""": Value("""string""" )} )
__snake_case = Features({} )
__snake_case = "text"
@property
def a__ ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 434
| 1
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 449
|
'''simple docstring'''
from __future__ import annotations
A = '#'
class __snake_case :
def __init__( self ):
"""simple docstring"""
lowerCamelCase : dict = {}
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : int = self._trie
for char in text:
if char not in trie:
lowerCamelCase : Dict = {}
lowerCamelCase : Optional[int] = trie[char]
lowerCamelCase : Optional[Any] = True
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Dict = self._trie
for char in prefix:
if char in trie:
lowerCamelCase : int = trie[char]
else:
return []
return self._elements(A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = []
for c, v in d.items():
lowerCamelCase : Optional[Any] = [' '] if c == END else [(c + s) for s in self._elements(A )]
result.extend(A )
return tuple(A )
A = Trie()
A = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Any = trie.find_word(UpperCAmelCase__)
return tuple(string + word for word in suffixes)
def UpperCAmelCase ( ):
print(autocomplete_using_trie('de'))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 449
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( __a , unittest.TestCase ):
__A : List[Any] = PhobertTokenizer
__A : Optional[int] = False
def UpperCAmelCase_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
lowerCAmelCase_ = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowerCAmelCase_ = ['''#version: 0.2''', '''l à</w>''']
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = '''Tôi là VinAI Research'''
lowerCAmelCase_ = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = '''Tôi là VinAI Research'''
lowerCAmelCase_ = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
lowerCAmelCase_ = tokenizer.tokenize(_lowerCamelCase )
print(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
| 274
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self , _lowerCamelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
lowerCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , only_pretrain_model=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , torchscript=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , fpaa=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
lowerCAmelCase_ = None
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCamelCase , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tinier_bart'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tinier_bart'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , save_to_csv=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCamelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowerCamelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowerCamelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowerCamelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowerCamelCase , '''env.csv''' ) , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''env.csv''' ) ).exists() )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCamelCase ):
self.assertTrue(hasattr(_lowerCamelCase , '''sequential''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''current''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCamelCase , '''log.txt''' ) , log_print=_lowerCamelCase , trace_memory_line_by_line=_lowerCamelCase , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''log.txt''' ) ).exists() )
| 274
| 1
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 5000_0000 ) -> Any:
'''simple docstring'''
snake_case_ = set()
snake_case_ = int((limit - 24) ** (1 / 2) )
snake_case_ = set(range(3, prime_square_limit + 1, 2 ) )
primes.add(2 )
for p in range(3, prime_square_limit + 1, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, prime_square_limit + 1, lowerCAmelCase_ ) ) )
for primea in primes:
snake_case_ = primea * primea
for primea in primes:
snake_case_ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
snake_case_ = primea * primea * primea * primea
snake_case_ = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase_ )
return len(lowerCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 593
| 0
|
def a_ ( __lowerCAmelCase ):
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
lowerCAmelCase__ = False
if low == high:
return swapped
lowerCAmelCase__ = low
lowerCAmelCase__ = high
while left < right:
if collection[left] > collection[right]:
lowerCAmelCase__ = (
collection[right],
collection[left],
)
lowerCAmelCase__ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCAmelCase__ = (
collection[right + 1],
collection[left],
)
lowerCAmelCase__ = True
lowerCAmelCase__ = low + int((high - low) / 2 )
lowerCAmelCase__ = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
lowerCAmelCase__ = True
while is_not_sorted is True:
lowerCAmelCase__ = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
__magic_name__ : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
__magic_name__ : Tuple = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 615
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__snake_case : Optional[str] = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__snake_case : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__snake_case : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__snake_case : Optional[float] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__snake_case : Optional[int] = field(
default=1_00_00 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__snake_case : Optional[float] = field(default=2e-4 , metadata={"""help""": """Learning rate fo training."""} )
__snake_case : Optional[str] = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__snake_case : Optional[int] = field(
default=7_50 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__snake_case : Optional[int] = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__snake_case : Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__snake_case : Optional[int] = field(default=5_00_00 , metadata={"""help""": """Maximum number of training steps."""} )
__snake_case : Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__snake_case : Optional[int] = field(default=10_24 , metadata={"""help""": """Sequence lengths used for training."""} )
__snake_case : Optional[int] = field(default=1 , metadata={"""help""": """Training seed."""} )
__snake_case : Optional[int] = field(
default=10_24 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__snake_case : Optional[bool] = field(default=snake_case__ , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__snake_case : Optional[int] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__snake_case : Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__snake_case : Optional[int] = field(default=10_24 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__snake_case : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__snake_case : Optional[int] = field(default=snake_case__ , metadata={"""help""": """Number of workers used for code evaluation."""} )
__snake_case : Optional[int] = field(
default=snake_case__ , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__snake_case : Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """Sample from the language model's output distribution."""} )
__snake_case : Optional[float] = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__snake_case : Optional[int] = field(default=2_56 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__snake_case : Optional[int] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__snake_case : Optional[float] = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__snake_case : Optional[int] = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
__snake_case : Optional[int] = field(
default=2_00 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__snake_case : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__snake_case : Optional[str] = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__snake_case : Optional[str] = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__snake_case : Optional[int] = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[int] = field(
default=snake_case__ , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__snake_case : Optional[str] = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__snake_case : Optional[str] = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__snake_case : Optional[int] = field(
default=10_00_00 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__snake_case : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__snake_case : Optional[float] = field(
default=10_00 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__snake_case : Optional[float] = field(
default=1_00 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__snake_case : Optional[float] = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__snake_case : Optional[float] = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__snake_case : Optional[float] = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__snake_case : Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__snake_case : Optional[float] = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__snake_case : Optional[str] = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__snake_case : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__snake_case : Optional[int] = field(default=20_00_00 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__snake_case : Optional[int] = field(
default=3_27_68 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__snake_case : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__snake_case : Optional[bool] = field(default=snake_case__ , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__snake_case : Optional[str] = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__snake_case : Optional[int] = field(default=snake_case__ , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__snake_case : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__snake_case : Optional[bool] = field(default=snake_case__ , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 179
| 0
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__a = "sshleifer/mar_enro_6_3_student"
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : List[str] ):
super().setUp()
snake_case__ : List[Any] = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=snake_case_ , )
snake_case__ : str = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def lowerCamelCase ( self : Union[str, Any] ):
MarianMTModel.from_pretrained(snake_case_ )
@slow
@require_torch_gpu
def lowerCamelCase ( self : List[Any] ):
snake_case__ : List[str] = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
snake_case__ : Any = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
snake_case__ : str = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
snake_case__ : Optional[Any] = bash_script.replace(snake_case_ , str(snake_case_ ) )
snake_case__ : Optional[int] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
snake_case__ : Optional[int] = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
snake_case__ : Tuple = ["""finetune.py"""] + bash_script.split() + args
with patch.object(snake_case_ , """argv""" , snake_case_ ):
snake_case__ : Optional[Any] = argparse.ArgumentParser()
snake_case__ : Optional[Any] = pl.Trainer.add_argparse_args(snake_case_ )
snake_case__ : Optional[int] = SummarizationModule.add_model_specific_args(snake_case_ , os.getcwd() )
snake_case__ : Optional[int] = parser.parse_args()
snake_case__ : List[str] = main(snake_case_ )
# Check metrics
snake_case__ : int = load_json(model.metrics_save_path )
snake_case__ : List[str] = metrics["""val"""][0]
snake_case__ : Any = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , snake_case_ )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
snake_case__ : str = os.listdir(snake_case_ )
snake_case__ : Any = [x for x in contents if x.endswith(""".ckpt""" )][0]
snake_case__ : List[str] = os.path.join(args.output_dir , snake_case_ )
snake_case__ : Tuple = torch.load(snake_case_ , map_location="""cpu""" )
snake_case__ : str = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
snake_case__ : str = {os.path.basename(snake_case_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
snake_case__ : Dict = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
snake_case__ : List[str] = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
snake_case__ : int = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
snake_case__ : str = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
snake_case__ : Union[str, Any] = bash_script.replace(snake_case_ , str(snake_case_ ) )
snake_case__ : List[str] = self.get_auto_remove_tmp_dir()
snake_case__ : Tuple = bash_script.replace("""--fp16""" , """""" )
snake_case__ : List[str] = 6
snake_case__ : Dict = (
["""distillation.py"""]
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
"""--gpus=1""",
"""--learning_rate=1e-3""",
f"--num_train_epochs={epochs}",
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(snake_case_ , """argv""" , snake_case_ ):
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
snake_case__ : int = pl.Trainer.add_argparse_args(snake_case_ )
snake_case__ : Optional[int] = SummarizationDistiller.add_model_specific_args(snake_case_ , os.getcwd() )
snake_case__ : List[str] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
snake_case__ : Dict = distill_main(snake_case_ )
# Check metrics
snake_case__ : Any = load_json(model.metrics_save_path )
snake_case__ : str = metrics["""val"""][0]
snake_case__ : Tuple = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , snake_case_ )
# check lightning ckpt can be loaded and has a reasonable statedict
snake_case__ : Dict = os.listdir(snake_case_ )
snake_case__ : Optional[int] = [x for x in contents if x.endswith(""".ckpt""" )][0]
snake_case__ : List[Any] = os.path.join(args.output_dir , snake_case_ )
snake_case__ : List[Any] = torch.load(snake_case_ , map_location="""cpu""" )
snake_case__ : Optional[Any] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
snake_case__ : int = {os.path.basename(snake_case_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 301
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case( ) -> Union[str, Any]:
snake_case__ : Union[str, Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=_lowerCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=_lowerCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=_lowerCAmelCase )
return parser.parse_args()
def __snake_case( ) -> int:
snake_case__ : Tuple = parse_args()
# Import training_script as a module.
snake_case__ : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case__ : Tuple = script_fpath.stem
snake_case__ : Any = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
snake_case__ : Any = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 301
| 1
|
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__A = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__A = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_SCREAMING_SNAKE_CASE )[0]
@deprecated(_SCREAMING_SNAKE_CASE , 'Please use tf.data to implement this functionality.' )
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=_SCREAMING_SNAKE_CASE ) as bytestream:
lowerCAmelCase__ :List[Any] = _readaa(_SCREAMING_SNAKE_CASE )
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
lowerCAmelCase__ :List[Any] = _readaa(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = _readaa(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = _readaa(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = bytestream.read(rows * cols * num_images )
lowerCAmelCase__ :List[str] = numpy.frombuffer(_SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
lowerCAmelCase__ :int = data.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
return data
@deprecated(_SCREAMING_SNAKE_CASE , 'Please use tf.one_hot on tensors.' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Dict = labels_dense.shape[0]
lowerCAmelCase__ :List[str] = numpy.arange(_SCREAMING_SNAKE_CASE ) * num_classes
lowerCAmelCase__ :Optional[Any] = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase__ :Union[str, Any] = 1
return labels_one_hot
@deprecated(_SCREAMING_SNAKE_CASE , 'Please use tf.data to implement this functionality.' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=10 ) ->Tuple:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=_SCREAMING_SNAKE_CASE ) as bytestream:
lowerCAmelCase__ :Union[str, Any] = _readaa(_SCREAMING_SNAKE_CASE )
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
lowerCAmelCase__ :Union[str, Any] = _readaa(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = bytestream.read(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = numpy.frombuffer(_SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return labels
class _lowerCAmelCase :
"""simple docstring"""
@deprecated(
__UpperCAmelCase , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=dtypes.floataa , __UpperCAmelCase=True , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = random_seed.get_seed(__UpperCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase__ :List[Any] = dtypes.as_dtype(__UpperCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
lowerCAmelCase__ :Any = 1_0_0_0_0
lowerCAmelCase__ :Union[str, Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase__ :Dict = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase__ :Dict = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase__ :List[Any] = images.astype(numpy.floataa )
lowerCAmelCase__ :Optional[int] = numpy.multiply(__UpperCAmelCase , 1.0 / 2_55.0 )
lowerCAmelCase__ :Union[str, Any] = images
lowerCAmelCase__ :Any = labels
lowerCAmelCase__ :Any = 0
lowerCAmelCase__ :Union[str, Any] = 0
@property
def snake_case ( self ):
'''simple docstring'''
return self._images
@property
def snake_case ( self ):
'''simple docstring'''
return self._labels
@property
def snake_case ( self ):
'''simple docstring'''
return self._num_examples
@property
def snake_case ( self ):
'''simple docstring'''
return self._epochs_completed
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
'''simple docstring'''
if fake_data:
lowerCAmelCase__ :List[str] = [1] * 7_8_4
lowerCAmelCase__ :Any = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__UpperCAmelCase )],
[fake_label for _ in range(__UpperCAmelCase )],
)
lowerCAmelCase__ :List[str] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase__ :Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
lowerCAmelCase__ :str = self.images[perma]
lowerCAmelCase__ :Optional[int] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase__ :int = self._num_examples - start
lowerCAmelCase__ :Any = self._images[start : self._num_examples]
lowerCAmelCase__ :Dict = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase__ :Any = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = self.images[perm]
lowerCAmelCase__ :Optional[int] = self.labels[perm]
# Start next epoch
lowerCAmelCase__ :Any = 0
lowerCAmelCase__ :Dict = batch_size - rest_num_examples
lowerCAmelCase__ :Optional[Any] = self._index_in_epoch
lowerCAmelCase__ :Tuple = self._images[start:end]
lowerCAmelCase__ :int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase__ :Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_SCREAMING_SNAKE_CASE , 'Please write your own downloading logic.' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
if not gfile.Exists(_SCREAMING_SNAKE_CASE ):
gfile.MakeDirs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not gfile.Exists(_SCREAMING_SNAKE_CASE ):
urllib.request.urlretrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # noqa: S310
with gfile.GFile(_SCREAMING_SNAKE_CASE ) as f:
lowerCAmelCase__ :Optional[Any] = f.size()
print('Successfully downloaded' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'bytes.' )
return filepath
@deprecated(
_SCREAMING_SNAKE_CASE , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=dtypes.floataa , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5000 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=DEFAULT_SOURCE_URL , ) ->Tuple:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = fake()
lowerCAmelCase__ :Optional[int] = fake()
lowerCAmelCase__ :List[str] = fake()
return _Datasets(train=_SCREAMING_SNAKE_CASE , validation=_SCREAMING_SNAKE_CASE , test=_SCREAMING_SNAKE_CASE )
if not source_url: # empty string check
lowerCAmelCase__ :List[str] = DEFAULT_SOURCE_URL
lowerCAmelCase__ :List[str] = 'train-images-idx3-ubyte.gz'
lowerCAmelCase__ :Dict = 'train-labels-idx1-ubyte.gz'
lowerCAmelCase__ :Dict = 't10k-images-idx3-ubyte.gz'
lowerCAmelCase__ :Tuple = 't10k-labels-idx1-ubyte.gz'
lowerCAmelCase__ :Dict = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + train_images_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowerCAmelCase__ :List[str] = _extract_images(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + train_labels_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowerCAmelCase__ :Tuple = _extract_labels(_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + test_images_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowerCAmelCase__ :int = _extract_images(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + test_labels_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowerCAmelCase__ :Dict = _extract_labels(_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE )
if not 0 <= validation_size <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = (
'Validation size should be between 0 and '
F"{len(_SCREAMING_SNAKE_CASE )}. Received: {validation_size}."
)
raise ValueError(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = train_images[:validation_size]
lowerCAmelCase__ :Any = train_labels[:validation_size]
lowerCAmelCase__ :Union[str, Any] = train_images[validation_size:]
lowerCAmelCase__ :Union[str, Any] = train_labels[validation_size:]
lowerCAmelCase__ :Any = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowerCAmelCase__ :Optional[int] = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return _Datasets(train=_SCREAMING_SNAKE_CASE , validation=_SCREAMING_SNAKE_CASE , test=_SCREAMING_SNAKE_CASE )
| 93
|
"""simple docstring"""
from __future__ import annotations
import math
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def __A (_SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCAmelCase__ :Tuple = []
for num in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :int = 0
while 2 * i * i <= odd_composites[num]:
lowerCAmelCase__ :int = odd_composites[num] - 2 * i * i
if is_prime(_SCREAMING_SNAKE_CASE ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_SCREAMING_SNAKE_CASE ) == n:
return list_nums
return []
def __A () ->int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 93
| 1
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A : Any = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
A : Dict = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def a__ ( ):
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , bootstrap_aggregation=__UpperCamelCase , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , bootstrap_aggregation=__UpperCamelCase , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "rougeLsum"
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=[k] )[k]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def a__ ( ):
SCREAMING_SNAKE_CASE_ = ["rouge1", "rouge2", "rougeL"]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=__UpperCamelCase )
assert score_sep == score_no_sep
def a__ ( ):
SCREAMING_SNAKE_CASE_ = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
SCREAMING_SNAKE_CASE_ = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase ) == calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
SCREAMING_SNAKE_CASE_ = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , rouge_keys=["rougeLsum"] , newline_sep=__UpperCamelCase )["rougeLsum"]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Path("examples/seq2seq/test_data/wmt_en_ro" )
SCREAMING_SNAKE_CASE_ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
| 356
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A : Any = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
A : Dict = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def a__ ( ):
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , bootstrap_aggregation=__UpperCamelCase , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , bootstrap_aggregation=__UpperCamelCase , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "rougeLsum"
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=[k] )[k]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def a__ ( ):
SCREAMING_SNAKE_CASE_ = ["rouge1", "rouge2", "rougeL"]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=__UpperCamelCase )
assert score_sep == score_no_sep
def a__ ( ):
SCREAMING_SNAKE_CASE_ = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
SCREAMING_SNAKE_CASE_ = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase ) == calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
SCREAMING_SNAKE_CASE_ = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , rouge_keys=["rougeLsum"] , newline_sep=__UpperCamelCase )["rougeLsum"]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Path("examples/seq2seq/test_data/wmt_en_ro" )
SCREAMING_SNAKE_CASE_ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
| 356
| 1
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = cva.getAffineTransform(lowerCAmelCase__ ,lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ ,lowerCAmelCase__ ,(rows, cols) )
if __name__ == "__main__":
# read original image
A_ = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
A_ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ = gray_img.shape
# set different points to rotate image
A_ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ = plt.figure(1)
A_ = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 29
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 431
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase : Optional[int] = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase : List[Any] = {
'distilbert-base-uncased': 5_12,
'distilbert-base-uncased-distilled-squad': 5_12,
'distilbert-base-cased': 5_12,
'distilbert-base-cased-distilled-squad': 5_12,
'distilbert-base-german-cased': 5_12,
'distilbert-base-multilingual-cased': 5_12,
}
__UpperCAmelCase : str = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ :Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :List[str] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ :str = ["input_ids", "attention_mask"]
UpperCAmelCase_ :Tuple = DistilBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
lowerCAmelCase_ :Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __A ) != tokenize_chinese_chars
):
lowerCAmelCase_ :int = getattr(__A , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ :Optional[int] = do_lower_case
lowerCAmelCase_ :Union[str, Any] = strip_accents
lowerCAmelCase_ :int = tokenize_chinese_chars
lowerCAmelCase_ :Optional[int] = normalizer_class(**__A )
lowerCAmelCase_ :Any = do_lower_case
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
lowerCAmelCase_ :Union[str, Any] = [self.sep_token_id]
lowerCAmelCase_ :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
lowerCAmelCase_ :str = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 720
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = "wavlm"
def __init__( self , __A=32 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.1 , __A=0.1 , __A=0.0_2 , __A=1E-5 , __A="group" , __A="gelu" , __A=(512, 512, 512, 512, 512, 512, 512) , __A=(5, 2, 2, 2, 2, 2, 2) , __A=(10, 3, 3, 3, 3, 2, 2) , __A=False , __A=128 , __A=16 , __A=320 , __A=800 , __A=False , __A=True , __A=0.0_5 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=320 , __A=2 , __A=0.1 , __A=100 , __A=256 , __A=256 , __A=0.1 , __A="mean" , __A=False , __A=False , __A=256 , __A=(512, 512, 512, 512, 1500) , __A=(5, 3, 3, 1, 1) , __A=(1, 2, 3, 1, 1) , __A=512 , __A=80 , __A=0 , __A=1 , __A=2 , __A=False , __A=3 , __A=2 , __A=3 , __A=None , **__A , ) -> Any:
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
lowerCAmelCase_ :Any = hidden_size
lowerCAmelCase_ :Union[str, Any] = feat_extract_norm
lowerCAmelCase_ :Optional[Any] = feat_extract_activation
lowerCAmelCase_ :int = list(__A )
lowerCAmelCase_ :Optional[int] = list(__A )
lowerCAmelCase_ :List[Any] = list(__A )
lowerCAmelCase_ :Any = conv_bias
lowerCAmelCase_ :int = num_buckets
lowerCAmelCase_ :List[str] = max_bucket_distance
lowerCAmelCase_ :List[str] = num_conv_pos_embeddings
lowerCAmelCase_ :Dict = num_conv_pos_embedding_groups
lowerCAmelCase_ :Union[str, Any] = len(self.conv_dim )
lowerCAmelCase_ :Dict = num_hidden_layers
lowerCAmelCase_ :List[str] = intermediate_size
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :List[Any] = num_attention_heads
lowerCAmelCase_ :Union[str, Any] = hidden_dropout
lowerCAmelCase_ :Optional[Any] = attention_dropout
lowerCAmelCase_ :List[Any] = activation_dropout
lowerCAmelCase_ :Union[str, Any] = feat_proj_dropout
lowerCAmelCase_ :Optional[Any] = final_dropout
lowerCAmelCase_ :Optional[Any] = layerdrop
lowerCAmelCase_ :Union[str, Any] = layer_norm_eps
lowerCAmelCase_ :Union[str, Any] = initializer_range
lowerCAmelCase_ :List[Any] = num_ctc_classes
lowerCAmelCase_ :Tuple = vocab_size
lowerCAmelCase_ :List[str] = do_stable_layer_norm
lowerCAmelCase_ :Union[str, Any] = use_weighted_layer_sum
lowerCAmelCase_ :Optional[int] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ :List[str] = apply_spec_augment
lowerCAmelCase_ :Tuple = mask_time_prob
lowerCAmelCase_ :Optional[Any] = mask_time_length
lowerCAmelCase_ :int = mask_time_min_masks
lowerCAmelCase_ :Optional[Any] = mask_feature_prob
lowerCAmelCase_ :Optional[int] = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase_ :Optional[Any] = num_codevectors_per_group
lowerCAmelCase_ :Optional[int] = num_codevector_groups
lowerCAmelCase_ :Tuple = contrastive_logits_temperature
lowerCAmelCase_ :Tuple = num_negatives
lowerCAmelCase_ :str = codevector_dim
lowerCAmelCase_ :int = proj_codevector_dim
lowerCAmelCase_ :Optional[Any] = diversity_loss_weight
# ctc loss
lowerCAmelCase_ :Union[str, Any] = ctc_loss_reduction
lowerCAmelCase_ :Optional[Any] = ctc_zero_infinity
# adapter
lowerCAmelCase_ :Union[str, Any] = add_adapter
lowerCAmelCase_ :List[str] = adapter_kernel_size
lowerCAmelCase_ :Union[str, Any] = adapter_stride
lowerCAmelCase_ :Union[str, Any] = num_adapter_layers
lowerCAmelCase_ :Tuple = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase_ :str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ :List[Any] = list(__A )
lowerCAmelCase_ :List[str] = list(__A )
lowerCAmelCase_ :Optional[int] = list(__A )
lowerCAmelCase_ :Optional[int] = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 256
| 0
|
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = tokenizer(example['''content'''] , truncation=_UpperCamelCase )['''input_ids''']
UpperCAmelCase_ = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowercase__ : Union[str, Any] = HfArgumentParser(PretokenizationArguments)
lowercase__ : Optional[int] = parser.parse_args()
if args.num_workers is None:
lowercase__ : str = multiprocessing.cpu_count()
lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowercase__ : str = time.time()
lowercase__ : List[str] = load_dataset(args.dataset_name, split="train")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowercase__ : Tuple = time.time()
lowercase__ : Optional[Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowercase__ : List[str] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 390
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
lowercase__ : List[str] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase__ ( self : Tuple ) ->str:
if self.train_file is not None:
UpperCAmelCase_ = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase_ = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__( self : int , UpperCAmelCase__ : int ) ->List[str]:
UpperCAmelCase_ = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase_ = [feature.pop(UpperCAmelCase__ ) for feature in features]
UpperCAmelCase_ = len(UpperCAmelCase__ )
UpperCAmelCase_ = len(features[0]['''input_ids'''] )
UpperCAmelCase_ = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
UpperCAmelCase_ = list(chain(*UpperCAmelCase__ ) )
UpperCAmelCase_ = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase_ = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase_ = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase_ = {}
if data_args.train_file is not None:
UpperCAmelCase_ = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase_ = data_args.validation_file
UpperCAmelCase_ = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase_ = load_dataset(
_UpperCamelCase , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase_ = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase_ = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase_ = '''sent1'''
UpperCAmelCase_ = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCamelCase : List[str] ):
UpperCAmelCase_ = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase_ = examples[question_header_name]
UpperCAmelCase_ = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_UpperCamelCase )
]
# Flatten out
UpperCAmelCase_ = list(chain(*_UpperCamelCase ) )
UpperCAmelCase_ = list(chain(*_UpperCamelCase ) )
# Tokenize
UpperCAmelCase_ = tokenizer(
_UpperCamelCase , _UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_UpperCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase_ = min(len(_UpperCamelCase ) , data_args.max_train_samples )
UpperCAmelCase_ = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase_ = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase_ = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
UpperCAmelCase_ = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase_ = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCamelCase : List[str] ):
UpperCAmelCase_ , UpperCAmelCase_ = eval_predictions
UpperCAmelCase_ = np.argmax(_UpperCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase_ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCAmelCase_ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ = last_checkpoint
UpperCAmelCase_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase_ = train_result.metrics
UpperCAmelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
UpperCAmelCase_ = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''train''' , _UpperCamelCase )
trainer.save_metrics('''train''' , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ = trainer.evaluate()
UpperCAmelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
UpperCAmelCase_ = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''eval''' , _UpperCamelCase )
trainer.save_metrics('''eval''' , _UpperCamelCase )
UpperCAmelCase_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : List[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 390
| 1
|
"""simple docstring"""
import os
lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def _lowerCAmelCase ( __lowerCamelCase:str ):
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = 0
while index < len(__lowerCamelCase ) - 1:
__magic_name__ = SYMBOLS[numerals[index]]
__magic_name__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowerCAmelCase ( __lowerCamelCase:int ):
'''simple docstring'''
__magic_name__ = ""
__magic_name__ = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
__magic_name__ = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
__magic_name__ = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowerCAmelCase ( __lowerCamelCase:str = "/p089_roman.txt" ):
'''simple docstring'''
__magic_name__ = 0
with open(os.path.dirname(__lowerCamelCase ) + roman_numerals_filename ) as filea:
__magic_name__ = filea.readlines()
for line in lines:
__magic_name__ = line.strip()
__magic_name__ = parse_roman_numerals(__lowerCamelCase )
__magic_name__ = generate_roman_numerals(__lowerCamelCase )
savings += len(__lowerCamelCase ) - len(__lowerCamelCase )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''')
| 468
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase = 250004
lowercase = 250020
@require_sentencepiece
@require_tokenizers
class A_ ( snake_case_ , unittest.TestCase ):
UpperCAmelCase__ = MBartaaTokenizer
UpperCAmelCase__ = MBartaaTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def _snake_case ( self : Any ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = MBartaaTokenizer(__lowerCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Dict ) -> List[Any]:
__magic_name__ = "<s>"
__magic_name__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _snake_case ( self : int ) -> Union[str, Any]:
__magic_name__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 1_0_5_4 )
def _snake_case ( self : Any ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def _snake_case ( self : str ) -> Optional[Any]:
__magic_name__ = MBartaaTokenizer(__lowerCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCamelCase )
__magic_name__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__magic_name__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
__magic_name__ = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__magic_name__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _snake_case ( self : List[str] ) -> List[str]:
# fmt: off
__magic_name__ = {"input_ids": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def _snake_case ( self : int ) -> Any:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__magic_name__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(__lowerCamelCase )
__magic_name__ = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__magic_name__ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(__lowerCamelCase )
__magic_name__ = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
__magic_name__ = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(__lowerCamelCase )
__magic_name__ = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
__magic_name__ = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(__lowerCamelCase )
__magic_name__ = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
UpperCAmelCase__ = '''facebook/mbart-large-50-one-to-many-mmt'''
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCAmelCase__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def _snake_case ( cls : Any ) -> Dict:
__magic_name__ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
__magic_name__ = 1
return cls
def _snake_case ( self : Any ) -> Dict:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 2_5_0_0_3_8 )
def _snake_case ( self : Any ) -> str:
__magic_name__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
def _snake_case ( self : Any ) -> List[str]:
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
__magic_name__ = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
__magic_name__ = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
__magic_name__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def _snake_case ( self : int ) -> List[str]:
__magic_name__ = ["this is gunna be a long sentence " * 2_0]
assert isinstance(src_text[0] , __lowerCamelCase )
__magic_name__ = 1_0
__magic_name__ = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0]
self.assertEqual(ids[0] , __lowerCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
def _snake_case ( self : str ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def _snake_case ( self : Any ) -> str:
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = MBartaaTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase )
@require_torch
def _snake_case ( self : int ) -> Dict:
__magic_name__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt" )
__magic_name__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _snake_case ( self : Dict ) -> Tuple:
__magic_name__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__magic_name__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
__magic_name__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _snake_case ( self : Optional[int] ) -> Dict:
__magic_name__ = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="pt" )
__magic_name__ = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0 , return_tensors="pt" )
__magic_name__ = targets["input_ids"]
__magic_name__ = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def _snake_case ( self : Optional[int] ) -> Tuple:
__magic_name__ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 2_5_0_0_0_1,
} , )
| 468
| 1
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCamelCase ( snake_case ):
for param in module.parameters():
_lowerCAmelCase = False
def _lowerCamelCase ( ):
_lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_lowerCAmelCase = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = plt.imshow(snake_case )
fig.axes.get_xaxis().set_visible(snake_case )
fig.axes.get_yaxis().set_visible(snake_case )
plt.show()
def _lowerCamelCase ( ):
_lowerCAmelCase = datetime.now()
_lowerCAmelCase = current_time.strftime('%H:%M:%S' )
return timestamp
| 192
|
def _lowerCamelCase ( snake_case ):
assert (
isinstance(snake_case , snake_case ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_lowerCAmelCase , _lowerCAmelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_lowerCAmelCase , _lowerCAmelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192
| 1
|
from __future__ import annotations
def A(__a: list , __a: int ):
# Checks if the entire collection has been sorted
if len(__a ) <= 1 or n <= 1:
return
insert_next(__a , n - 1 )
rec_insertion_sort(__a , n - 1 )
def A(__a: list , __a: int ):
# Checks order between adjacent elements
if index >= len(__a ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase_ , lowerCAmelCase_ = (
collection[index],
collection[index - 1],
)
insert_next(__a , index + 1 )
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter integers separated by spaces: ''')
lowerCamelCase__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 226
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCamelCase__ = 1_00
lowerCamelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCamelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def A(__a: int ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase_ = set()
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A(__a: int = 5000 ):
for number_to_partition in range(1 , __a ):
if len(partition(__a ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 226
| 1
|
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase )-> bool:
'''simple docstring'''
return len(set(snake_case_ ) ) == len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = """philschmid/bart-large-cnn-samsum"""
_SCREAMING_SNAKE_CASE : Optional[Any] = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_SCREAMING_SNAKE_CASE : Tuple = """summarizer"""
_SCREAMING_SNAKE_CASE : Any = AutoTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSeqaSeqLM
_SCREAMING_SNAKE_CASE : int = ["""text"""]
_SCREAMING_SNAKE_CASE : List[Any] = ["""text"""]
def a ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
return self.pre_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , truncation=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )[0]
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
return self.pre_processor.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
| 427
| 0
|
'''simple docstring'''
from PIL import Image
def __A ( a_ : Image ):
lowerCAmelCase , lowerCAmelCase : List[Any] = image.size
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = image.load()
for i in range(a_ ):
for j in range(a_ ):
lowerCAmelCase : str = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(a_ ):
for i in range(a_ ):
lowerCAmelCase : Optional[int] = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCAmelCase = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 551
|
'''simple docstring'''
def __A ( a_ : int ):
assert (
isinstance(a_ ,a_ ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowerCAmelCase , lowerCAmelCase : int = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551
| 1
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
SCREAMING_SNAKE_CASE = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
SCREAMING_SNAKE_CASE = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
SCREAMING_SNAKE_CASE = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _lowerCamelCase ( __A : Dict , __A : Any ) -> Tuple:
return float((preds == labels).mean() )
def _lowerCamelCase ( __A : Dict , __A : Any , __A : str="binary" ) -> Tuple:
_UpperCAmelCase : Optional[Any] = simple_accuracy(__A , __A )
_UpperCAmelCase : Union[str, Any] = float(fa_score(y_true=__A , y_pred=__A , average=__A ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCamelCase ( __A : str , __A : Dict ) -> str:
_UpperCAmelCase : Any = {}
for id_pred, label in zip(__A , __A ):
_UpperCAmelCase : Tuple = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_UpperCAmelCase : Tuple = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_UpperCAmelCase : str = [(pred, label)]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = [], []
for question, preds_labels in question_map.items():
_UpperCAmelCase , _UpperCAmelCase : Tuple = zip(*__A )
_UpperCAmelCase : Dict = fa_score(y_true=__A , y_pred=__A , average='''macro''' )
fas.append(__A )
_UpperCAmelCase : int = int(sum(pred == label for pred, label in preds_labels ) == len(__A ) )
ems.append(__A )
_UpperCAmelCase : List[Any] = float(sum(__A ) / len(__A ) )
_UpperCAmelCase : Any = sum(__A ) / len(__A )
_UpperCAmelCase : List[str] = float(fa_score(y_true=__A , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def snake_case__ ( self) -> Dict:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64'''),
"query": datasets.Value('''int64'''),
},
"prediction_text": datasets.Value('''string'''),
},
"references": {
"idx": {
"passage": datasets.Value('''int64'''),
"query": datasets.Value('''int64'''),
},
"answers": datasets.Sequence(datasets.Value('''string''')),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64'''),
"paragraph": datasets.Value('''int64'''),
"question": datasets.Value('''int64'''),
},
"prediction": datasets.Value('''int64'''),
},
"references": datasets.Value('''int64'''),
}
else:
return {
"predictions": datasets.Value('''int64'''),
"references": datasets.Value('''int64'''),
}
def snake_case__ ( self , _A , _A) -> Dict:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_A , _A)}
elif self.config_name == "cb":
return acc_and_fa(_A , _A , fa_avg='''macro''')
elif self.config_name == "record":
_UpperCAmelCase : Tuple = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_UpperCAmelCase : int = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_A , _A)[0]
elif self.config_name == "multirc":
return evaluate_multirc(_A , _A)
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_A , _A)}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''')
| 485
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = "switch_transformers"
_SCREAMING_SNAKE_CASE : int = ["past_key_values"]
_SCREAMING_SNAKE_CASE : Optional[Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _A=32128 , _A=768 , _A=64 , _A=2048 , _A=64 , _A=12 , _A=3 , _A=12 , _A=3 , _A=12 , _A=8 , _A=False , _A=0.01 , _A="float32" , _A=False , _A=32 , _A=128 , _A=0.1 , _A=1e-6 , _A=0.001 , _A=0.001 , _A=1.0 , _A="relu" , _A=True , _A=False , _A=True , _A=0 , _A=1 , **_A , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = d_model
_UpperCAmelCase : Dict = d_kv
_UpperCAmelCase : str = d_ff
_UpperCAmelCase : int = num_sparse_encoder_layers
_UpperCAmelCase : Dict = num_layers
_UpperCAmelCase : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCAmelCase : Dict = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_UpperCAmelCase : int = self.num_layers // self.num_sparse_encoder_layers
else:
_UpperCAmelCase : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_UpperCAmelCase : Optional[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_UpperCAmelCase : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
_UpperCAmelCase : Any = num_heads
_UpperCAmelCase : List[Any] = num_experts
_UpperCAmelCase : List[str] = expert_capacity
_UpperCAmelCase : List[str] = router_bias
_UpperCAmelCase : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''')
_UpperCAmelCase : List[str] = router_dtype
_UpperCAmelCase : Any = router_ignore_padding_tokens
_UpperCAmelCase : Optional[Any] = relative_attention_num_buckets
_UpperCAmelCase : Optional[int] = relative_attention_max_distance
_UpperCAmelCase : List[Any] = dropout_rate
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Union[str, Any] = initializer_factor
_UpperCAmelCase : int = feed_forward_proj
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Optional[int] = add_router_probs
_UpperCAmelCase : Optional[int] = router_z_loss_coef
_UpperCAmelCase : List[str] = router_aux_loss_coef
_UpperCAmelCase : Union[str, Any] = self.feed_forward_proj.split('''-''')
_UpperCAmelCase : int = act_info[-1]
_UpperCAmelCase : int = act_info[0] == '''gated'''
if len(_A) > 1 and act_info[0] != "gated" or len(_A) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_UpperCAmelCase : Optional[Any] = '''gelu_new'''
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , **_A , )
| 485
| 1
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Optional[Any] = DDIMPipeline
snake_case__ : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ : List[Any] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
snake_case__ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ : int = False
def _A ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase :Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
UpperCamelCase :Tuple = DDIMScheduler()
UpperCamelCase :Optional[Any] = {"""unet""": unet, """scheduler""": scheduler}
return components
def _A ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any]=0 ):
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :int = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :int = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Optional[Any] ):
UpperCamelCase :Union[str, Any] = """cpu"""
UpperCamelCase :int = self.get_dummy_components()
UpperCamelCase :Optional[Any] = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Tuple = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Tuple = pipe(**__lowerCamelCase ).images
UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
UpperCamelCase :Any = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
UpperCamelCase :Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
def _A ( self : int ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _A ( self : Any ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _A ( self : List[Any] ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _A ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Tuple ):
UpperCamelCase :str = """google/ddpm-cifar10-32"""
UpperCamelCase :int = UNetaDModel.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[Any] = DDIMScheduler()
UpperCamelCase :int = DDIMPipeline(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
ddim.to(__lowerCamelCase )
ddim.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Tuple = torch.manual_seed(0 )
UpperCamelCase :Dict = ddim(generator=__lowerCamelCase , eta=0.0 , output_type="""numpy""" ).images
UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase :Dict = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : int ):
UpperCamelCase :Optional[Any] = """google/ddpm-ema-bedroom-256"""
UpperCamelCase :Optional[Any] = UNetaDModel.from_pretrained(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = DDIMScheduler.from_pretrained(__lowerCamelCase )
UpperCamelCase :Optional[int] = DDIMPipeline(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
ddpm.to(__lowerCamelCase )
ddpm.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Dict = torch.manual_seed(0 )
UpperCamelCase :Any = ddpm(generator=__lowerCamelCase , output_type="""numpy""" ).images
UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase :List[str] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 707
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Any:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__magic_name__ ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :List[str] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCamelCase :List[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
UpperCamelCase :Any = PipelineDataFormat.from_str(
format=__magic_name__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__magic_name__ , __magic_name__ )
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Tuple , __lowerCamelCase : Pipeline , __lowerCamelCase : PipelineDataFormat ):
UpperCamelCase :Optional[Any] = nlp
UpperCamelCase :Dict = reader
@staticmethod
def _A ( __lowerCamelCase : ArgumentParser ):
UpperCamelCase :List[Any] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=__lowerCamelCase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=__lowerCamelCase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=__lowerCamelCase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=__lowerCamelCase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=__lowerCamelCase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=__lowerCamelCase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=__lowerCamelCase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=__lowerCamelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=__lowerCamelCase )
def _A ( self : str ):
UpperCamelCase , UpperCamelCase :List[Any] = self._nlp, []
for entry in self._reader:
UpperCamelCase :List[Any] = nlp(**__lowerCamelCase ) if self._reader.is_multi_columns else nlp(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
outputs.append(__lowerCamelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase :Union[str, Any] = self._reader.save_binary(__lowerCamelCase )
logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(__lowerCamelCase )
| 590
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase_ = {
'''google/fnet-base''': 5_1_2,
'''google/fnet-large''': 5_1_2,
}
lowerCAmelCase_ = '''▁'''
class _snake_case( UpperCAmelCase ):
__snake_case: Union[str, Any] = VOCAB_FILES_NAMES
__snake_case: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case: List[str] = ['''input_ids''', '''token_type_ids''']
__snake_case: List[Any] = FNetTokenizer
def __init__(self : List[Any] , a : str=None , a : str=None , a : Tuple=False , a : Any=True , a : int=True , a : List[str]="<unk>" , a : Dict="[SEP]" , a : Optional[Any]="<pad>" , a : Optional[Any]="[CLS]" , a : Dict="[MASK]" , **a : List[str] , ) -> int:
"""simple docstring"""
A__ = (
AddedToken(a , lstrip=a , rstrip=a , normalized=a )
if isinstance(a , a )
else mask_token
)
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _UpperCamelCase (self : Tuple , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase (self : List[str] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase (self : List[str] , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 531
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCAmelCase_ = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCAmelCase_ = concatenate_datasets
lowerCAmelCase_ = DownloadConfig
lowerCAmelCase_ = DownloadManager
lowerCAmelCase_ = DownloadMode
lowerCAmelCase_ = DownloadConfig
lowerCAmelCase_ = DownloadMode
lowerCAmelCase_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 531
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase_ : List[str] = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
lowerCamelCase_ : Union[str, Any] = {
'facebook/bart-base': 10_24,
'facebook/bart-large': 10_24,
'facebook/bart-large-mnli': 10_24,
'facebook/bart-large-cnn': 10_24,
'facebook/bart-large-xsum': 10_24,
'yjernite/bart_eli5': 10_24,
}
@lru_cache()
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : List[str] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
A_ : int = bs[:]
A_ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
A_ : Union[str, Any] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = set()
A_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A_ : Dict = char
return pairs
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = VOCAB_FILES_NAMES
lowercase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_ , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , **snake_case_ , ):
"""simple docstring"""
A_ : Optional[int] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
A_ : int = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
A_ : Tuple = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
A_ : Dict = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
A_ : int = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
A_ : int = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='utf-8' ) as vocab_handle:
A_ : Union[str, Any] = json.load(snake_case_ )
A_ : str = {v: k for k, v in self.encoder.items()}
A_ : Tuple = errors # how to handle errors in decoding
A_ : Tuple = bytes_to_unicode()
A_ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='utf-8' ) as merges_handle:
A_ : List[str] = merges_handle.read().split('\n' )[1:-1]
A_ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
A_ : Dict = {}
A_ : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : int = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A_ : Any = tuple(snake_case_ )
A_ : Any = get_pairs(snake_case_ )
if not pairs:
return token
while True:
A_ : Optional[int] = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : List[str] = bigram
A_ : Optional[Any] = []
A_ : Tuple = 0
while i < len(snake_case_ ):
try:
A_ : Any = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Any = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : Optional[Any] = tuple(snake_case_ )
A_ : Optional[Any] = new_word
if len(snake_case_ ) == 1:
break
else:
A_ : Any = get_pairs(snake_case_ )
A_ : Any = ' '.join(snake_case_ )
A_ : Any = word
return word
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Any = []
for token in re.findall(self.pat , snake_case_ ):
A_ : Union[str, Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(' ' ) )
return bpe_tokens
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.decoder.get(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = ''.join(snake_case_ )
A_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A_ : int = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '\n' )
A_ : Dict = 0
with open(snake_case_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
A_ : Optional[Any] = token_index
writer.write(' '.join(snake_case_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
A_ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : str = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self , snake_case_ , snake_case_=False , **snake_case_ ):
"""simple docstring"""
A_ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
A_ : Optional[Any] = ' ' + text
return (text, kwargs)
| 302
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : str = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
A_ : int = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
A_ : List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A_ : Union[str, Any] = key[key.find('patch_embed' ) + len('patch_embed' )]
A_ : List[str] = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
A_ : Tuple = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A_ : Optional[int] = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
A_ : Any = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
A_ : Any = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
A_ : Tuple = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
A_ : List[str] = key[key.find('block' ) + len('block' )]
A_ : str = key.replace(f"""block{idx}""" , f"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
A_ : List[str] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
A_ : Optional[Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
A_ : Optional[int] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
A_ : Union[str, Any] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
A_ : Optional[Any] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
A_ : Optional[Any] = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
A_ : int = key.replace('linear_fuse.conv' , 'linear_fuse' )
A_ : Optional[int] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A_ : List[Any] = key[key.find('linear_c' ) + len('linear_c' )]
A_ : Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
A_ : Union[str, Any] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
A_ : Any = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
A_ : Tuple = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
A_ : Any = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
A_ : int = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
A_ : Optional[Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
A_ : Dict = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
A_ : List[Any] = key.replace('module.last_layer_depth' , 'head.head' )
A_ : Optional[int] = value
return new_state_dict
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A_ : str = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
A_ : List[str] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
A_ : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
A_ : Dict = kv_bias[: config.hidden_sizes[i]]
A_ : str = kv_weight[
config.hidden_sizes[i] :, :
]
A_ : List[str] = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : Dict = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ):
"""simple docstring"""
A_ : Tuple = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A_ : Union[str, Any] = GLPNImageProcessor()
# prepare image
A_ : int = prepare_img()
A_ : Dict = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
A_ : str = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
A_ : Tuple = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
A_ : int = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
A_ : int = model(_UpperCAmelCase )
A_ : Optional[int] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A_ : Any = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
A_ : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
A_ : Optional[int] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1E-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
lowerCamelCase_ : str = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 302
| 1
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a__ :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=64 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=3 , _a=4 , _a=None , ):
lowercase : Union[str, Any] = parent
lowercase : str = batch_size
lowercase : Dict = seq_length
lowercase : int = is_training
lowercase : Optional[int] = use_input_mask
lowercase : Optional[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : List[str] = vocab_size
lowercase : Tuple = hidden_size
lowercase : Union[str, Any] = embedding_size
lowercase : str = num_hidden_layers
lowercase : Any = num_attention_heads
lowercase : List[Any] = intermediate_size
lowercase : List[Any] = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Any = max_position_embeddings
lowercase : Any = type_vocab_size
lowercase : Tuple = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : Union[str, Any] = num_labels
lowercase : Optional[Any] = num_choices
lowercase : Optional[Any] = scope
def __magic_name__ ( self ):
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] = None
if self.use_input_mask:
lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Optional[Any] = None
if self.use_token_type_ids:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[str] = None
lowercase : Dict = None
lowercase : Optional[int] = None
if self.use_labels:
lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : int = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : Tuple = MobileBertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase : Any = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
lowercase : Union[str, Any] = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
lowercase : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : List[str] = MobileBertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase : str = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : Optional[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase : List[Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : List[Any] = MobileBertForPreTraining(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase : str = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , next_sentence_label=lowerCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : int = MobileBertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase : Dict = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : Union[str, Any] = self.num_labels
lowercase : List[Any] = MobileBertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase : List[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : Any = self.num_labels
lowercase : List[str] = MobileBertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : List[Any] = self.num_choices
lowercase : Union[str, Any] = MobileBertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : str = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ):
lowercase : Dict = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, unittest.TestCase ):
__lowerCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
def __magic_name__ ( self , _a , _a , _a=False ):
lowercase : int = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
lowercase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def __magic_name__ ( self ):
lowercase : str = MobileBertModelTester(self )
lowercase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __magic_name__ ( self ):
self.config_tester.run_common_tests()
def __magic_name__ ( self ):
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase__ )
def __magic_name__ ( self ):
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase__ )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase__ )
def __magic_name__ ( self ):
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase__ )
def __magic_name__ ( self ):
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase__ )
def __magic_name__ ( self ):
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase__ )
def __magic_name__ ( self ):
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase__ )
def __magic_name__ ( self ):
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase__ )
def __magic_name__ ( __snake_case : Tuple ) -> int:
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
_A : Tuple = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
lowercase : Union[str, Any] = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(lowerCamelCase__ )
lowercase : str = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
lowercase : List[Any] = model(lowerCamelCase__ )[0]
lowercase : Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowerCamelCase__ )
lowercase : int = torch.tensor(
[
[
[-2.4736526E07, 8.2691656E04, 1.6521838E05],
[-5.7541704E-01, 3.9056022E00, 4.4011507E00],
[2.6047359E00, 1.5677652E00, -1.7324188E-01],
]
] , device=lowerCamelCase__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowercase : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowercase : List[str] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 361
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 332
| 0
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : str ) -> int:
"""simple docstring"""
A__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
A__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"], num_layers=predefined_args["num_layers"], units=predefined_args["units"], hidden_size=predefined_args["hidden_size"], max_length=predefined_args["max_length"], num_heads=predefined_args["num_heads"], scaled=predefined_args["scaled"], dropout=predefined_args["dropout"], output_attention=UpperCAmelCase_, output_all_encodings=UpperCAmelCase_, use_residual=predefined_args["use_residual"], activation=predefined_args.get("activation", "gelu" ), layer_norm_eps=predefined_args.get("layer_norm_eps", UpperCAmelCase_ ), )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
A__ = os.path.join(get_home_dir(), "models" )
A__ = _load_vocab(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, cls=UpperCAmelCase_ )
A__ = nlp.model.BERTModel(
UpperCAmelCase_, len(UpperCAmelCase_ ), units=predefined_args["units"], embed_size=predefined_args["embed_size"], embed_dropout=predefined_args["embed_dropout"], word_embed=predefined_args["word_embed"], use_pooler=UpperCAmelCase_, use_token_type_embed=UpperCAmelCase_, token_type_vocab_size=predefined_args["token_type_vocab_size"], use_classifier=UpperCAmelCase_, use_decoder=UpperCAmelCase_, )
original_bort.load_parameters(UpperCAmelCase_, cast_dtype=UpperCAmelCase_, ignore_extra=UpperCAmelCase_ )
A__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
A__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(UpperCAmelCase_ ),
}
A__ = BertConfig.from_dict(UpperCAmelCase_ )
A__ = BertForMaskedLM(UpperCAmelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(UpperCAmelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : List[Any] ):
A__ = hf_param.shape
A__ = to_torch(params[gluon_param] )
A__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight, "word_embed.0.weight" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight, "encoder.position_weight" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias, "encoder.layer_norm.beta" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight, "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A__ = hf_bort_model.bert.encoder.layer[i]
# self attention
A__ = layer.attention.self
A__ = check_and_map_params(
self_attn.key.bias.data, F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
A__ = check_and_map_params(
self_attn.key.weight.data, F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
A__ = check_and_map_params(
self_attn.query.bias.data, F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
A__ = check_and_map_params(
self_attn.query.weight.data, F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
A__ = check_and_map_params(
self_attn.value.bias.data, F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
A__ = check_and_map_params(
self_attn.value.weight.data, F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
A__ = layer.attention.output
A__ = check_and_map_params(
self_output.dense.bias, F"""encoder.transformer_cells.{i}.proj.bias""" )
A__ = check_and_map_params(
self_output.dense.weight, F"""encoder.transformer_cells.{i}.proj.weight""" )
A__ = check_and_map_params(
self_output.LayerNorm.bias, F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
A__ = check_and_map_params(
self_output.LayerNorm.weight, F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
A__ = layer.intermediate
A__ = check_and_map_params(
intermediate.dense.bias, F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
A__ = check_and_map_params(
intermediate.dense.weight, F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
A__ = layer.output
A__ = check_and_map_params(
bert_output.dense.bias, F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
A__ = check_and_map_params(
bert_output.dense.weight, F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
A__ = check_and_map_params(
bert_output.LayerNorm.bias, F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
A__ = check_and_map_params(
bert_output.LayerNorm.weight, F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A__ = RobertaTokenizer.from_pretrained("roberta-base" )
A__ = tokenizer.encode_plus(UpperCAmelCase_ )["input_ids"]
# Get gluon output
A__ = mx.nd.array([input_ids] )
A__ = original_bort(inputs=UpperCAmelCase_, token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(UpperCAmelCase_ )
A__ = BertModel.from_pretrained(UpperCAmelCase_ )
hf_bort_model.eval()
A__ = tokenizer.encode_plus(UpperCAmelCase_, return_tensors="pt" )
A__ = hf_bort_model(**UpperCAmelCase_ )[0]
A__ = output_gluon[0].asnumpy()
A__ = output_hf[0].detach().numpy()
A__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
A__ = np.allclose(UpperCAmelCase_, UpperCAmelCase_, atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:", UpperCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 709
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 562
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] ="""▁"""
_lowerCAmelCase : Optional[int] ={"""vocab_file""": """spiece.model"""}
_lowerCAmelCase : Optional[Any] ={
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
_lowerCAmelCase : Optional[Any] ={
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase__ , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__=[] , lowerCamelCase__ = None , **lowerCamelCase__ , ):
UpperCAmelCase__: Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCAmelCase__: str = vocab_file
UpperCAmelCase__: Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def _UpperCAmelCase ( self ):
return self.sp_model.get_piece_size()
def _UpperCAmelCase ( self ):
UpperCAmelCase__: str = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
UpperCAmelCase__: Tuple = self.__dict__.copy()
UpperCAmelCase__: int = None
return state
def __setstate__( self , lowerCamelCase__ ):
UpperCAmelCase__: List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__: str = {}
UpperCAmelCase__: Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if index < self.sp_model.get_piece_size():
UpperCAmelCase__: Optional[int] = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
return token
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: Union[str, Any] = []
UpperCAmelCase__: str = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
UpperCAmelCase__: List[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__: List[str] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCAmelCase__: Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 113
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCAmelCase =imread(R"digital_image_processing/image_data/lena_small.jpg")
__lowerCAmelCase =cvtColor(img, COLOR_BGR2GRAY)
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = cn.convert_to_negative(_lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __UpperCamelCase ( ):
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_lowerCAmelCase , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCAmelCase = canny.canny(_lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __UpperCamelCase ( ):
"""simple docstring"""
assert gg.gaussian_filter(_lowerCAmelCase , 5 , sigma=0.9 ).all()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCAmelCase = conv.img_convolve(_lowerCAmelCase , _lowerCAmelCase ).astype(_lowerCAmelCase )
assert res.any()
def __UpperCamelCase ( ):
"""simple docstring"""
assert med.median_filter(_lowerCAmelCase , 3 ).any()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = sob.sobel_filter(_lowerCAmelCase )
assert grad.any() and theta.any()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = sp.make_sepia(_lowerCAmelCase , 20 )
assert sepia.all()
def __UpperCamelCase ( _lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
UpperCAmelCase = bs.Burkes(imread(_lowerCAmelCase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def __UpperCamelCase ( _lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
UpperCAmelCase = rs.NearestNeighbour(imread(_lowerCAmelCase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
UpperCAmelCase = imread(_lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = image[x_coordinate][y_coordinate]
UpperCAmelCase = lbp.get_neighbors_pixel(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCAmelCase = lbp.local_binary_value(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert lbp_image.any()
| 333
| 0
|
def A_ ( snake_case : int , snake_case : int ) -> int:
'''simple docstring'''
__UpperCamelCase = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__UpperCamelCase = n - k
# Calculate C(n,k)
for i in range(snake_case ):
result *= n - i
result //= i + 1
return result
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
return binomial_coefficient(2 * node_count , snake_case ) // (node_count + 1)
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
__UpperCamelCase = 1
for i in range(1 , n + 1 ):
result *= i
return result
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
return catalan_number(snake_case ) * factorial(snake_case )
if __name__ == "__main__":
lowercase__ : List[str] = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F"Given {node_count} nodes, there are {binary_tree_count(node_count)} "
F"binary trees and {catalan_number(node_count)} binary search trees."
)
| 451
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 * 8 , SCREAMING_SNAKE_CASE_=32 * 8 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=64 , )-> Dict:
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = is_training
__UpperCamelCase = use_auxiliary_loss
__UpperCamelCase = num_queries
__UpperCamelCase = num_channels
__UpperCamelCase = min_size
__UpperCamelCase = max_size
__UpperCamelCase = num_labels
__UpperCamelCase = hidden_dim
__UpperCamelCase = hidden_dim
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
__UpperCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
__UpperCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__UpperCamelCase = self.num_queries
__UpperCamelCase = self.num_labels
__UpperCamelCase = [1, 1, 1, 1]
__UpperCamelCase = self.num_channels
__UpperCamelCase = 64
__UpperCamelCase = 128
__UpperCamelCase = self.hidden_dim
__UpperCamelCase = self.hidden_dim
__UpperCamelCase = self.hidden_dim
return config
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
__UpperCamelCase = output.encoder_hidden_states
__UpperCamelCase = output.pixel_decoder_hidden_states
__UpperCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) , config.decoder_layers )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False )-> Tuple:
'''simple docstring'''
with torch.no_grad():
__UpperCamelCase = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(pixel_values=SCREAMING_SNAKE_CASE_ , pixel_mask=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[str]:
'''simple docstring'''
__UpperCamelCase = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCamelCase = model(pixel_values=SCREAMING_SNAKE_CASE_ , pixel_mask=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(
pixel_values=SCREAMING_SNAKE_CASE_ , pixel_mask=SCREAMING_SNAKE_CASE_ , mask_labels=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_snake_case = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = MaskaFormerModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def A__ ( self )-> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def A__ ( self )-> List[str]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def A__ ( self )-> Dict:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A__ ( self )-> str:
'''simple docstring'''
pass
def A__ ( self )-> str:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@slow
def A__ ( self )-> Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__UpperCamelCase = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = (self.model_tester.min_size,) * 2
__UpperCamelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE_ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE_ ),
'''class_labels''': torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE_ ).long(),
}
__UpperCamelCase = self.model_tester.get_config()
__UpperCamelCase = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def A__ ( self )-> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase = self.all_model_classes[1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , mask_labels=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = self.all_model_classes[1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , mask_labels=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__UpperCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase__ : Any = 1e-4
def A_ ( ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self )-> List[Any]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self )-> Any:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_ , (1, 3, 384, 384) )
with torch.no_grad():
__UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def A__ ( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_ , (1, 3, 384, 384) )
with torch.no_grad():
__UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
__UpperCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__UpperCamelCase = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
__UpperCamelCase = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
__UpperCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__UpperCamelCase = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def A__ ( self )-> str:
'''simple docstring'''
__UpperCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
__UpperCamelCase = inputs['''pixel_values'''].to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs['''mask_labels''']]
__UpperCamelCase = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
__UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 451
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"vocab_file": "sentencepiece.model"}
_UpperCamelCase = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
_UpperCamelCase = {
"google/rembert": 2_56,
}
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case_ , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_="[CLS]" , snake_case_="[SEP]" , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , **snake_case_ , ):
'''simple docstring'''
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
A__ : List[Any] = do_lower_case
A__ : Dict = remove_space
A__ : Optional[int] = keep_accents
A__ : Tuple = vocab_file
A__ : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(snake_case_ )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
A__ : str = self.__dict__.copy()
A__ : Tuple = None
return state
def __setstate__( self , snake_case_ ):
'''simple docstring'''
A__ : Dict = d
A__ : int = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self , snake_case_ , snake_case_=False ):
'''simple docstring'''
A__ : Tuple = self.sp_model.EncodeAsPieces(snake_case_ )
return pieces
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case_ )
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case_ )
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : str = self.sp_model.decode_pieces(snake_case_ )
return out_string
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
A__ : List[Any] = [self.sep_token_id]
A__ : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
A__ : int = [self.sep_token_id]
A__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(snake_case_ ) )
return
A__ : Any = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 363
| 0
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=32 ,UpperCamelCase=2 ,UpperCamelCase=3 ,UpperCamelCase=16 ,UpperCamelCase=[32, 64, 128] ,UpperCamelCase=[1, 2, 1] ,UpperCamelCase=[2, 2, 4] ,UpperCamelCase=2 ,UpperCamelCase=2.0 ,UpperCamelCase=True ,UpperCamelCase=0.0 ,UpperCamelCase=0.0 ,UpperCamelCase=0.1 ,UpperCamelCase="gelu" ,UpperCamelCase=False ,UpperCamelCase=True ,UpperCamelCase=0.02 ,UpperCamelCase=1E-5 ,UpperCamelCase=True ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase=10 ,UpperCamelCase=8 ,UpperCamelCase=["stage1", "stage2"] ,UpperCamelCase=[1, 2] ,) -> Optional[Any]:
snake_case__ :int = parent
snake_case__ :Tuple = batch_size
snake_case__ :int = image_size
snake_case__ :Any = patch_size
snake_case__ :Any = num_channels
snake_case__ :Union[str, Any] = embed_dim
snake_case__ :Any = hidden_sizes
snake_case__ :Dict = depths
snake_case__ :int = num_heads
snake_case__ :int = window_size
snake_case__ :Optional[int] = mlp_ratio
snake_case__ :List[str] = qkv_bias
snake_case__ :Optional[Any] = hidden_dropout_prob
snake_case__ :Optional[int] = attention_probs_dropout_prob
snake_case__ :Optional[Any] = drop_path_rate
snake_case__ :Optional[int] = hidden_act
snake_case__ :str = use_absolute_embeddings
snake_case__ :Any = patch_norm
snake_case__ :int = layer_norm_eps
snake_case__ :str = initializer_range
snake_case__ :Tuple = is_training
snake_case__ :Any = scope
snake_case__ :Any = use_labels
snake_case__ :List[Any] = type_sequence_label_size
snake_case__ :List[str] = encoder_stride
snake_case__ :str = out_features
snake_case__ :List[str] = out_indices
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ :Dict = None
if self.use_labels:
snake_case__ :Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ :Any = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
snake_case__ :List[Any] = FocalNetModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :Any = model(UpperCamelCase )
snake_case__ :Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case__ :Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
snake_case__ :List[Any] = FocalNetBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :List[str] = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
snake_case__ :Optional[int] = None
snake_case__ :Union[str, Any] = FocalNetBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :Tuple = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :Any = FocalNetForMaskedImageModeling(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :str = model(UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ :Union[str, Any] = 1
snake_case__ :str = FocalNetForMaskedImageModeling(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ :Optional[int] = model(UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
snake_case__ :List[Any] = self.type_sequence_label_size
snake_case__ :int = FocalNetForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :str = model(UpperCamelCase ,labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ :List[str] = 1
snake_case__ :Union[str, Any] = FocalNetForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ :Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :List[str] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ :str = config_and_inputs
snake_case__ :Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _A , _A , unittest.TestCase ):
_A = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_A = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Any = FocalNetModelTester(self )
snake_case__ :Any = ConfigTester(self ,config_class=UpperCamelCase ,embed_dim=37 ,has_text_modality=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self ) -> str:
return
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCAmelCase_ ( self ) -> Dict:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCAmelCase_ ( self ) -> List[Any]:
pass
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ , snake_case__ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case__ :List[Any] = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
snake_case__ :Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase ,nn.Linear ) )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case__ :Union[str, Any] = model_class(UpperCamelCase )
snake_case__ :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ :Tuple = [*signature.parameters.keys()]
snake_case__ :List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :List[str] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case__ :Any = model(**self._prepare_for_class(UpperCamelCase ,UpperCamelCase ) )
snake_case__ :Optional[int] = outputs.hidden_states
snake_case__ :Union[str, Any] = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase ) ,UpperCamelCase )
# FocalNet has a different seq_length
snake_case__ :List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ :Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
snake_case__ :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase ) ,UpperCamelCase )
snake_case__ , snake_case__ , snake_case__ , snake_case__ :Dict = reshaped_hidden_states[0].shape
snake_case__ :Any = (
reshaped_hidden_states[0].view(UpperCamelCase ,UpperCamelCase ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ :Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
snake_case__ :List[Any] = True
self.check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ :Any = True
self.check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ , snake_case__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ :List[Any] = 3
snake_case__ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case__ :Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case__ :Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
snake_case__ :Tuple = True
self.check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ :int = True
self.check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,(padded_height, padded_width) )
@slow
def lowerCAmelCase_ ( self ) -> str:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ :Tuple = FocalNetModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ :int = _config_zero_init(UpperCamelCase )
for model_class in self.all_model_classes:
snake_case__ :Dict = model_class(config=UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :str = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCamelCase )
snake_case__ :Optional[int] = self.default_image_processor
snake_case__ :Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
snake_case__ :int = image_processor(images=UpperCamelCase ,return_tensors="pt" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ :Union[str, Any] = model(**UpperCamelCase )
# verify the logits
snake_case__ :Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,UpperCamelCase )
snake_case__ :Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,UpperCamelCase ,atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( _A , unittest.TestCase ):
_A = (FocalNetBackbone,) if is_torch_available() else ()
_A = FocalNetConfig
_A = False
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Any = FocalNetModelTester(self )
| 57
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 1
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case = None , snake_case = None , snake_case = False , **snake_case , ):
super().__init__(features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , **_UpperCAmelCase )
snake_case_ = Sql(
cache_dir=_UpperCAmelCase , features=_UpperCAmelCase , sql=_UpperCAmelCase , con=_UpperCAmelCase , **_UpperCAmelCase , )
def a ( self ):
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , )
# Build dataset for splits
snake_case_ = self.builder.as_dataset(
split='train' , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = None , **snake_case , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
snake_case_ = dataset
snake_case_ = name
snake_case_ = con
snake_case_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case_ = num_proc
snake_case_ = to_sql_kwargs
def a ( self ):
snake_case_ = self.to_sql_kwargs.pop('sql' , _UpperCAmelCase )
snake_case_ = self.to_sql_kwargs.pop('con' , _UpperCAmelCase )
snake_case_ = self.to_sql_kwargs.pop('index' , _UpperCAmelCase )
snake_case_ = self._write(index=_UpperCAmelCase , **self.to_sql_kwargs )
return written
def a ( self , snake_case ):
snake_case_ = args
snake_case_ = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
snake_case_ = query_table(
table=self.dataset.data , key=slice(_UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case_ = batch.to_pandas()
snake_case_ = df.to_sql(self.name , self.con , index=_UpperCAmelCase , **_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def a ( self , snake_case , **snake_case ):
snake_case_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
snake_case_ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _UpperCAmelCase , _UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 362
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["input_features", "is_longer"]
def __init__( self : Union[str, Any] , snake_case_ : str=64 , snake_case_ : Union[str, Any]=4_80_00 , snake_case_ : Dict=4_80 , snake_case_ : List[str]=10 , snake_case_ : Any=10_24 , snake_case_ : Tuple=0.0 , snake_case_ : List[str]=False , snake_case_ : float = 0 , snake_case_ : float = 1_40_00 , snake_case_ : int = None , snake_case_ : str = "fusion" , snake_case_ : str = "repeatpad" , **snake_case_ : List[Any] , )-> List[Any]:
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
__lowerCAmelCase =top_db
__lowerCAmelCase =truncation
__lowerCAmelCase =padding
__lowerCAmelCase =fft_window_size
__lowerCAmelCase =(fft_window_size >> 1) + 1
__lowerCAmelCase =hop_length
__lowerCAmelCase =max_length_s
__lowerCAmelCase =max_length_s * sampling_rate
__lowerCAmelCase =sampling_rate
__lowerCAmelCase =frequency_min
__lowerCAmelCase =frequency_max
__lowerCAmelCase =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm=snake_case_ , mel_scale="""htk""" , )
__lowerCAmelCase =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm="""slaney""" , mel_scale="""slaney""" , )
def UpperCamelCase ( self : List[Any])-> Dict[str, Any]:
__lowerCAmelCase =copy.deepcopy(self.__dict__)
__lowerCAmelCase =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase ( self : str , snake_case_ : np.array , snake_case_ : Optional[np.array] = None)-> np.ndarray:
__lowerCAmelCase =spectrogram(
snake_case_ , window_function(self.fft_window_size , """hann""") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case_ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def UpperCamelCase ( self : int , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Any)-> Optional[int]:
__lowerCAmelCase =np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
__lowerCAmelCase =[0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
__lowerCAmelCase =[0]
# randomly choose index for each part
__lowerCAmelCase =np.random.choice(ranges[0])
__lowerCAmelCase =np.random.choice(ranges[1])
__lowerCAmelCase =np.random.choice(ranges[2])
__lowerCAmelCase =mel[idx_front : idx_front + chunk_frames, :]
__lowerCAmelCase =mel[idx_middle : idx_middle + chunk_frames, :]
__lowerCAmelCase =mel[idx_back : idx_back + chunk_frames, :]
__lowerCAmelCase =torch.tensor(mel[None, None, :])
__lowerCAmelCase =torch.nn.functional.interpolate(
snake_case_ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=snake_case_)
__lowerCAmelCase =mel_shrink[0][0].numpy()
__lowerCAmelCase =np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : np.array , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : List[Any])-> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__lowerCAmelCase =True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__lowerCAmelCase =len(snake_case_) - max_length
__lowerCAmelCase =np.random.randint(0 , overflow + 1)
__lowerCAmelCase =waveform[idx : idx + max_length]
__lowerCAmelCase =self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
__lowerCAmelCase =self._np_extract_fbank_features(snake_case_ , self.mel_filters)
__lowerCAmelCase =max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__lowerCAmelCase =mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__lowerCAmelCase =np.stack([mel, mel, mel, mel] , axis=0)
__lowerCAmelCase =False
else:
__lowerCAmelCase =self._random_mel_fusion(snake_case_ , snake_case_ , snake_case_)
__lowerCAmelCase =True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""")
else:
__lowerCAmelCase =False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__lowerCAmelCase =int(max_length / len(snake_case_))
__lowerCAmelCase =np.stack(np.tile(snake_case_ , n_repeat + 1))[:max_length]
if padding == "repeatpad":
__lowerCAmelCase =int(max_length / len(snake_case_))
__lowerCAmelCase =np.stack(np.tile(snake_case_ , snake_case_))
__lowerCAmelCase =np.pad(snake_case_ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0)
if truncation == "fusion":
__lowerCAmelCase =self._np_extract_fbank_features(snake_case_ , self.mel_filters)
__lowerCAmelCase =np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
__lowerCAmelCase =self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , snake_case_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case_ : str = None , snake_case_ : Optional[str] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : Any , )-> BatchFeature:
__lowerCAmelCase =truncation if truncation is not None else self.truncation
__lowerCAmelCase =padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
__lowerCAmelCase =isinstance(snake_case_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
__lowerCAmelCase =is_batched_numpy or (
isinstance(snake_case_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__lowerCAmelCase =[np.asarray(snake_case_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray):
__lowerCAmelCase =np.asarray(snake_case_ , dtype=np.floataa)
elif isinstance(snake_case_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__lowerCAmelCase =raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__lowerCAmelCase =[np.asarray(snake_case_)]
# convert to mel spectrogram, truncate and pad if needed.
__lowerCAmelCase =[
self._get_input_mel(snake_case_ , max_length if max_length else self.nb_max_samples , snake_case_ , snake_case_)
for waveform in raw_speech
]
__lowerCAmelCase =[]
__lowerCAmelCase =[]
for mel, longer in padded_inputs:
input_mel.append(snake_case_)
is_longer.append(snake_case_)
if truncation == "fusion" and sum(snake_case_) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__lowerCAmelCase =np.random.randint(0 , len(snake_case_))
__lowerCAmelCase =True
if isinstance(input_mel[0] , snake_case_):
__lowerCAmelCase =[np.asarray(snake_case_ , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
__lowerCAmelCase =[[longer] for longer in is_longer]
__lowerCAmelCase ={"""input_features""": input_mel, """is_longer""": is_longer}
__lowerCAmelCase =BatchFeature(snake_case_)
if return_tensors is not None:
__lowerCAmelCase =input_features.convert_to_tensors(snake_case_)
return input_features
| 456
|
def __lowerCAmelCase ( __lowerCamelCase : int = 3 , __lowerCamelCase : int = 7 , __lowerCamelCase : int = 1000000 ) -> int:
__lowerCAmelCase =0
__lowerCAmelCase =1
for current_denominator in range(1 , limit + 1 ):
__lowerCAmelCase =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowerCAmelCase =current_numerator
__lowerCAmelCase =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 456
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 426
|
import unittest
from knapsack import greedy_knapsack as kp
class _A ( unittest.TestCase ):
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Dict = [10, 20, 30, 40, 50, 60]
lowercase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowercase : Optional[int] = 100
self.assertEqual(kp.calc_profit(_A , _A , _A ) , 210 )
def __a ( self : Dict ) -> int:
"""simple docstring"""
self.assertRaisesRegex(_A , '''max_weight must greater than zero.''' )
def __a ( self : str ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(_A , '''Weight can not be negative.''' )
def __a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertRaisesRegex(_A , '''Profit can not be negative.''' )
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(_A , '''max_weight must greater than zero.''' )
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertRaisesRegex(
_A , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 217
| 0
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Any = logging.get_logger(__name__)
set_seed(7_70)
__snake_case : Union[str, Any] = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
__snake_case : int = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
__snake_case : Optional[int] = os.path.dirname(os.path.abspath(__file__))
__snake_case : Union[str, Any] = os.path.join(os.path.expanduser("""~"""), """.cache""")
__snake_case : Any = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=False ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = model_type
if use_small:
key += "_small"
return os.path.join(UpperCamelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
hf_hub_download(repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , local_dir=UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any=False , UpperCamelCase_ : Union[str, Any]="text" ) -> Optional[Any]:
"""simple docstring"""
if model_type == "text":
lowerCAmelCase__ = BarkSemanticModel
lowerCAmelCase__ = BarkSemanticConfig
lowerCAmelCase__ = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCAmelCase__ = BarkCoarseModel
lowerCAmelCase__ = BarkCoarseConfig
lowerCAmelCase__ = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCAmelCase__ = BarkFineModel
lowerCAmelCase__ = BarkFineConfig
lowerCAmelCase__ = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCAmelCase__ = F"{model_type}_small" if use_small else model_type
lowerCAmelCase__ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCamelCase_ ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['repo_id'] , model_info['file_name'] )
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location=UpperCamelCase_ )
# this is a hack
lowerCAmelCase__ = checkpoint['model_args']
if "input_vocab_size" not in model_args:
lowerCAmelCase__ = model_args['vocab_size']
lowerCAmelCase__ = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCAmelCase__ = model_args.pop('n_head' )
lowerCAmelCase__ = model_args.pop('n_embd' )
lowerCAmelCase__ = model_args.pop('n_layer' )
lowerCAmelCase__ = ConfigClass(**checkpoint['model_args'] )
lowerCAmelCase__ = ModelClass(config=UpperCamelCase_ )
lowerCAmelCase__ = GenerationConfigClass()
lowerCAmelCase__ = model_generation_config
lowerCAmelCase__ = checkpoint['model']
# fixup checkpoint
lowerCAmelCase__ = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(UpperCamelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCAmelCase__ = k[len(UpperCamelCase_ ) :]
for old_layer_name in new_layer_name_dict:
lowerCAmelCase__ = new_k.replace(UpperCamelCase_ , new_layer_name_dict[old_layer_name] )
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCAmelCase__ = {k for k in extra_keys if not k.endswith('.attn.bias' )}
lowerCAmelCase__ = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCAmelCase__ = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(UpperCamelCase_ ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(UpperCamelCase_ ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
lowerCAmelCase__ = model.num_parameters(exclude_embeddings=UpperCamelCase_ )
lowerCAmelCase__ = checkpoint['best_val_loss'].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(UpperCamelCase_ , 3 )} loss" )
model.eval()
model.to(UpperCamelCase_ )
del checkpoint, state_dict
return model
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=False , UpperCamelCase_ : Optional[int]="text" ) -> Dict:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCAmelCase__ = 'cpu' # do conversion on cpu
lowerCAmelCase__ = _get_ckpt_path(UpperCamelCase_ , use_small=UpperCamelCase_ )
lowerCAmelCase__ = _load_model(UpperCamelCase_ , UpperCamelCase_ , model_type=UpperCamelCase_ , use_small=UpperCamelCase_ )
# load bark initial model
lowerCAmelCase__ = _bark_load_model(UpperCamelCase_ , 'cpu' , model_type=UpperCamelCase_ , use_small=UpperCamelCase_ )
if model_type == "text":
lowerCAmelCase__ = bark_model['model']
if model.num_parameters(exclude_embeddings=UpperCamelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
lowerCAmelCase__ = 5
lowerCAmelCase__ = 10
if model_type in ["text", "coarse"]:
lowerCAmelCase__ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowerCAmelCase__ = bark_model(UpperCamelCase_ )[0]
lowerCAmelCase__ = model(UpperCamelCase_ )
# take last logits
lowerCAmelCase__ = output_new_model_total.logits[:, [-1], :]
else:
lowerCAmelCase__ = 3
lowerCAmelCase__ = 8
lowerCAmelCase__ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCAmelCase__ = model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = bark_model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = BarkSemanticConfig.from_pretrained(os.path.join(UpperCamelCase_ , 'config.json' ) )
lowerCAmelCase__ = BarkCoarseConfig.from_pretrained(os.path.join(UpperCamelCase_ , 'config.json' ) )
lowerCAmelCase__ = BarkFineConfig.from_pretrained(os.path.join(UpperCamelCase_ , 'config.json' ) )
lowerCAmelCase__ = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase__ = BarkSemanticModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = BarkCoarseModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = BarkFineModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = EncodecModel.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase__ = BarkConfig.from_sub_model_configs(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCAmelCase__ = BarkModel(UpperCamelCase_ )
lowerCAmelCase__ = semantic
lowerCAmelCase__ = coarseAcoustic
lowerCAmelCase__ = fineAcoustic
lowerCAmelCase__ = codec
lowerCAmelCase__ = bark_generation_config
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
bark.save_pretrained(UpperCamelCase_ , repo_id=UpperCamelCase_ , push_to_hub=UpperCamelCase_ )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
__snake_case : str = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 711
|
from typing import Dict, Optional
import numpy as np
import datasets
__snake_case : str = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
__snake_case : Tuple = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
__snake_case : Any = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : bool , UpperCamelCase_ : Optional[Dict[int, int]] = None , UpperCamelCase_ : bool = False , ) -> List[Any]:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCAmelCase__ = new_id
# turn into Numpy arrays
lowerCAmelCase__ = np.array(UpperCamelCase_ )
lowerCAmelCase__ = np.array(UpperCamelCase_ )
if reduce_labels:
lowerCAmelCase__ = 255
lowerCAmelCase__ = label - 1
lowerCAmelCase__ = 255
lowerCAmelCase__ = label != ignore_index
lowerCAmelCase__ = np.not_equal(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = pred_label[mask]
lowerCAmelCase__ = np.array(UpperCamelCase_ )[mask]
lowerCAmelCase__ = pred_label[pred_label == label]
lowerCAmelCase__ = np.histogram(UpperCamelCase_ , bins=UpperCamelCase_ , range=(0, num_labels - 1) )[0]
lowerCAmelCase__ = np.histogram(UpperCamelCase_ , bins=UpperCamelCase_ , range=(0, num_labels - 1) )[0]
lowerCAmelCase__ = np.histogram(UpperCamelCase_ , bins=UpperCamelCase_ , range=(0, num_labels - 1) )[0]
lowerCAmelCase__ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : bool , UpperCamelCase_ : Optional[Dict[int, int]] = None , UpperCamelCase_ : bool = False , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase__ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase__ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase__ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = intersect_and_union(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : bool , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Dict[int, int]] = None , UpperCamelCase_ : bool = False , ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = total_intersect_and_union(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# compute metrics
lowerCAmelCase__ = {}
lowerCAmelCase__ = total_area_intersect.sum() / total_area_label.sum()
lowerCAmelCase__ = total_area_intersect / total_area_union
lowerCAmelCase__ = total_area_intersect / total_area_label
lowerCAmelCase__ = np.nanmean(UpperCamelCase_ )
lowerCAmelCase__ = np.nanmean(UpperCamelCase_ )
lowerCAmelCase__ = all_acc
lowerCAmelCase__ = iou
lowerCAmelCase__ = acc
if nan_to_num is not None:
lowerCAmelCase__ = {metric: np.nan_to_num(UpperCamelCase_ , nan=UpperCamelCase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , ):
"""simple docstring"""
lowerCAmelCase__ = mean_iou(
results=_UpperCamelCase , gt_seg_maps=_UpperCamelCase , num_labels=_UpperCamelCase , ignore_index=_UpperCamelCase , nan_to_num=_UpperCamelCase , label_map=_UpperCamelCase , reduce_labels=_UpperCamelCase , )
return iou_result
| 365
| 0
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 101
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case__ : str = logging.get_logger(__name__)
def _snake_case (__lowercase):
if isinstance(__lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(__lowercase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(__lowercase):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""")
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = size if size is not None else {'shortest_edge': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = resample
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size['shortest_edge'] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCamelCase_ = (size['height'], size['width'])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCamelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCamelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCamelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCamelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCamelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCamelCase_ = make_batched(_UpperCAmelCase )
UpperCamelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCamelCase_ = {'pixel_values': videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 23
| 0
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _SCREAMING_SNAKE_CASE ( A : str , A : str , **A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = AutoConfig.from_pretrained(A , **A )
__snake_case : Any = AutoModelForSeqaSeqLM.from_config(A )
model.save_pretrained(A )
AutoTokenizer.from_pretrained(A ).save_pretrained(A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 61
|
'''simple docstring'''
import math
class a_ :
def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1
"""simple docstring"""
__snake_case : List[str] = n
__snake_case : Tuple = [
[math.inf for j in range(0 , __a)] for i in range(0 , __a)
] # adjacency matrix for weight
__snake_case : Union[str, Any] = [
[math.inf for j in range(0 , __a)] for i in range(0 , __a)
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = w
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
__snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
__A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 61
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
|
'''simple docstring'''
from typing import Any
class _a :
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
_snake_case = data
_snake_case = None
class _a :
def __init__( self ) -> List[Any]:
_snake_case = None
def _lowercase ( self ) -> str:
_snake_case = self.head
while temp is not None:
print(temp.data ,end=" " )
_snake_case = temp.next
print()
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
_snake_case = Node(_SCREAMING_SNAKE_CASE )
_snake_case = self.head
_snake_case = new_node
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
if node_data_a == node_data_a:
return
else:
_snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
_snake_case = node_a.next
_snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
_snake_case = node_a.next
if node_a is None or node_a is None:
return
_snake_case , _snake_case = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase_ : str = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 185
| 0
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
a__ : List[Any] = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] = "dhaka" , SCREAMING_SNAKE_CASE_ : List[Any] = 5 ) -> int:
"""simple docstring"""
UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
UpperCAmelCase = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
UpperCAmelCase = requests.get('''https://www.google.com/search''' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
UpperCAmelCase = BeautifulSoup(html.text , '''html.parser''' )
UpperCAmelCase = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
UpperCAmelCase = json.dumps(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = json.loads(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
UpperCAmelCase = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(_SCREAMING_SNAKE_CASE ) , )
UpperCAmelCase = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
UpperCAmelCase = bytes(_SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCAmelCase = bytes(_SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCAmelCase = urllib.request.build_opener()
UpperCAmelCase = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
a__ : Optional[int] = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 702
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt'}
a__ : List[str] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a__ : Any = {
'facebook/esm2_t6_8M_UR50D': 1_024,
'facebook/esm2_t12_35M_UR50D': 1_024,
}
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
UpperCAmelCase = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =VOCAB_FILES_NAMES
_lowerCamelCase =PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase =["input_ids", "attention_mask"]
def __init__( self : Dict , a__ : Optional[int] , a__ : Optional[Any]="<unk>" , a__ : Any="<cls>" , a__ : Dict="<pad>" , a__ : int="<mask>" , a__ : List[Any]="<eos>" , **a__ : List[Any] , ):
super().__init__(**a__ )
UpperCAmelCase = load_vocab_file(a__ )
UpperCAmelCase = dict(enumerate(self.all_tokens ) )
UpperCAmelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase = unk_token
UpperCAmelCase = cls_token
UpperCAmelCase = pad_token
UpperCAmelCase = mask_token
UpperCAmelCase = eos_token
UpperCAmelCase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __snake_case ( self : int , a__ : int ):
return self._id_to_token.get(a__ , self.unk_token )
def __snake_case ( self : int , a__ : str ):
return self._token_to_id.get(a__ , self._token_to_id.get(self.unk_token ) )
def __snake_case ( self : Any , a__ : str , **a__ : List[str] ):
return text.split()
def __snake_case ( self : str , a__ : Tuple=False ):
return len(self._id_to_token )
def __snake_case ( self : str ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __snake_case ( self : int , a__ : str ):
return self._token_to_id.get(a__ , self._token_to_id.get(self.unk_token ) )
def __snake_case ( self : Dict , a__ : int ):
return self._id_to_token.get(a__ , self.unk_token )
def __snake_case ( self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __snake_case ( self : Optional[int] , a__ : List , a__ : Optional[List] = None , a__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase = [1] + ([0] * len(a__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(a__ ) + [1]
return mask
def __snake_case ( self : Tuple , a__ : List[str] , a__ : Dict ):
UpperCAmelCase = os.path.join(a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(a__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __snake_case ( self : Union[str, Any] ):
return self.get_vocab_size(with_added_tokens=a__ )
def __snake_case ( self : int , a__ : Union[List[str], List[AddedToken]] , a__ : bool = False ):
return super()._add_tokens(a__ , special_tokens=a__ )
| 570
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ :str = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Dict = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35
|
def _a ( a :list ) -> list:
if len(a ) < 2:
return collection
def circle_sort_util(a :list , a :int , a :int ) -> bool:
a = False
if low == high:
return swapped
a = low
a = high
while left < right:
if collection[left] > collection[right]:
a , a = (
collection[right],
collection[left],
)
a = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
a , a = (
collection[right + 1],
collection[left],
)
a = True
a = low + int((high - low) / 2 )
a = circle_sort_util(a , a , a )
a = circle_sort_util(a , mid + 1 , a )
return swapped or left_swap or right_swap
a = True
while is_not_sorted is True:
a = circle_sort_util(a , 0 , len(a ) - 1 )
return collection
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 117
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowercase ( a : List[str] , a : str , a : str , a : Path , a : str = None , a : str = None , a : str = None , ) -> List[str]:
if config_name_or_path is None:
__snake_case : List[str] ="facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
__snake_case : List[Any] =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__snake_case : Optional[Any] =question_encoder_name_or_path
__snake_case : Optional[Any] =RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
__snake_case : str =RagConfig.from_pretrained(_A )
__snake_case : Tuple =AutoConfig.from_pretrained(_A )
__snake_case : Tuple =AutoConfig.from_pretrained(_A )
__snake_case : Dict =gen_config
__snake_case : Dict =question_encoder_config
__snake_case : List[Any] =model_class.from_pretrained_question_encoder_generator(
_A , _A , config=_A )
rag_model.save_pretrained(_A )
# Sanity check.
model_class.from_pretrained(_A )
# Save tokenizers.
__snake_case : Any =AutoTokenizer.from_pretrained(_A )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
__snake_case : List[str] =AutoTokenizer.from_pretrained(_A )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
UpperCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
UpperCamelCase_ : Tuple = parser.parse_args()
UpperCamelCase_ : str = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 705
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ : Optional[Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
UpperCamelCase_ : int = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
UpperCamelCase_ : Optional[int] = """▁"""
class _lowercase ( lowerCAmelCase ):
_a : int = VOCAB_FILES_NAMES
_a : int = PRETRAINED_VOCAB_FILES_MAP
_a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , a : Optional[Any] , a : Optional[Any]="<s>" , a : Dict="</s>" , a : Any="</s>" , a : Optional[int]="<s>" , a : Optional[Any]="<unk>" , a : int="<pad>" , a : Tuple="<mask>" , a : Optional[Dict[str, Any]] = None , **a : List[Any] , ):
"""simple docstring"""
__snake_case : Dict =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
__snake_case : List[str] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__snake_case : Optional[int] =vocab_file
__snake_case : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
__snake_case : Optional[Any] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
__snake_case : Optional[int] =len(self.sp_model ) - 1
__snake_case : Union[str, Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCamelCase ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict =[self.cls_token_id]
__snake_case : List[str] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case : Optional[int] =[self.sep_token_id]
__snake_case : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return len(self.sp_model )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : List[Any] ={self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self : str , a : str ):
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def _UpperCamelCase ( self : int , a : Union[str, Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : List[Any] =self.sp_model.PieceToId(a )
return spm_id if spm_id else self.unk_token_id
def _UpperCamelCase ( self : List[str] , a : Dict ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a )
def _UpperCamelCase ( self : Optional[int] , a : int ):
"""simple docstring"""
__snake_case : int =[]
__snake_case : Optional[int] =''''''
__snake_case : Optional[int] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
__snake_case : str =True
__snake_case : int =[]
else:
current_sub_tokens.append(a )
__snake_case : Tuple =False
out_string += self.sp_model.decode(a )
return out_string.strip()
def __getstate__( self : Optional[int] ):
"""simple docstring"""
__snake_case : List[Any] =self.__dict__.copy()
__snake_case : Optional[Any] =None
return state
def __setstate__( self : Optional[int] , a : Optional[Any] ):
"""simple docstring"""
__snake_case : Any =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__snake_case : Tuple ={}
__snake_case : int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case : int =os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , '''wb''' ) as fi:
__snake_case : List[Any] =self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 497
| 0
|
"""simple docstring"""
from math import factorial
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(lowercase_ ) // (factorial(lowercase_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"If a class of 40 students must be arranged into groups of",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f"""are {combinations(10, 3)} ways that first, second and""",
"third place can be awarded.",
)
| 624
|
"""simple docstring"""
import numpy as np
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1e-12 ,lowercase_ = 100 ,) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
_UpperCamelCase : Dict = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[int] = 1e12
while not convergence:
# Multiple matrix by the vector.
_UpperCamelCase : Optional[int] = np.dot(lowercase_ ,lowercase_ )
# Normalize the resulting output vector.
_UpperCamelCase : List[Any] = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_UpperCamelCase : Optional[Any] = vector.conj().T if is_complex else vector.T
_UpperCamelCase : Dict = np.dot(lowercase_ ,np.dot(lowercase_ ,lowercase_ ) )
# Check convergence.
_UpperCamelCase : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Tuple = lambda_
if is_complex:
_UpperCamelCase : Tuple = np.real(lambda_ )
return lambda_, vector
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Dict = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_UpperCamelCase : Optional[int] = np.array([41, 4, 20] )
_UpperCamelCase : Union[str, Any] = real_input_matrix.astype(np.complexaaa )
_UpperCamelCase : int = np.triu(1J * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_UpperCamelCase : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_UpperCamelCase : Dict = real_input_matrix
_UpperCamelCase : Any = real_vector
elif problem_type == "complex":
_UpperCamelCase : int = complex_input_matrix
_UpperCamelCase : Union[str, Any] = complex_vector
# Our implementation.
_UpperCamelCase, _UpperCamelCase : int = power_iteration(lowercase_ ,lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_UpperCamelCase, _UpperCamelCase : str = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
_UpperCamelCase : Any = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_UpperCamelCase : Tuple = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 624
| 1
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : Optional[int] = {'''vocab_file''': '''spiece.model'''}
A__ : Dict = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
A__ : Any = {
'''google/bigbird-roberta-base''': 4_0_9_6,
'''google/bigbird-roberta-large''': 4_0_9_6,
'''google/bigbird-base-trivia-itc''': 4_0_9_6,
}
class snake_case__ ( __a ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ["input_ids", "attention_mask"]
A__ = []
def __init__( self : Dict , __a : List[str] , __a : Union[str, Any]="<unk>" , __a : Optional[int]="<s>" , __a : Union[str, Any]="</s>" , __a : int="<pad>" , __a : List[Any]="[SEP]" , __a : Tuple="[MASK]" , __a : Optional[Any]="[CLS]" , __a : List[Any] = None , **__a : str , ) -> None:
'''simple docstring'''
__snake_case : List[str] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
__snake_case : Tuple = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
__snake_case : List[str] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
__snake_case : List[str] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
__snake_case : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
__snake_case : Optional[Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : List[str] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__snake_case : List[Any] = vocab_file
__snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : str , __a : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : List[Any] = {}
__snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Dict , __a : Any ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def A_ ( self : Optional[Any] , __a : Any ) -> Any:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def A_ ( self : List[Any] , __a : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : str = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def A_ ( self : str , __a : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = []
__snake_case : Optional[Any] = ''
__snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__snake_case : List[Any] = True
__snake_case : str = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
__snake_case : Optional[Any] = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def A_ ( self : Optional[int] , __a : Optional[int] , __a : Dict = False , __a : Dict = None , __a : Union[str, Any] = True , **__a : Dict , ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = kwargs.pop('use_source_tokenizer' , lowerCAmelCase_ )
__snake_case : Optional[Any] = self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__snake_case : Any = []
__snake_case : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
__snake_case : Union[str, Any] = []
sub_texts.append(lowerCAmelCase_ )
else:
current_sub_text.append(lowerCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__snake_case : Tuple = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(lowerCAmelCase_ ) )
else:
__snake_case : str = ''.join(lowerCAmelCase_ )
__snake_case : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__snake_case : Any = self.clean_up_tokenization(lowerCAmelCase_ )
return clean_text
else:
return text
def A_ ( self : Union[str, Any] , __a : Any , __a : str = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case : str = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__snake_case : int = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def A_ ( self : int , __a : int , __a : str = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
__snake_case : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def A_ ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] = None , __a : Optional[Any] = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def A_ ( self : Optional[Any] , __a : List[Any] , __a : int = None ) -> List[int]:
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 719
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def a_ ( _UpperCAmelCase : int ) -> Optional[Any]:
__snake_case : Any = int(_UpperCAmelCase )
__snake_case , __snake_case , __snake_case : Union[str, Any] = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[int]=3_00 ) -> Dict:
# docstyle-ignore
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def a_ ( _UpperCAmelCase : List[str] ) -> Optional[int]:
__snake_case : Any = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case : List[Any] = f'''{elt:.6f}''' if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else str(_UpperCAmelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class snake_case__ :
A__ = 5
A__ = 0.2
def __init__( self : List[Any] , __a : int , __a : Optional[str] = None , __a : bool = True , __a : Optional["NotebookTrainingTracker"] = None , __a : int = 300 , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[Any] = total
__snake_case : Dict = '' if prefix is None else prefix
__snake_case : Tuple = leave
__snake_case : Dict = parent
__snake_case : List[Any] = width
__snake_case : str = None
__snake_case : Tuple = None
__snake_case : str = None
def A_ ( self : Union[str, Any] , __a : int , __a : bool = False , __a : str = None ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = value
if comment is not None:
__snake_case : Optional[int] = comment
if self.last_value is None:
__snake_case : Dict = time.time()
__snake_case : Dict = value
__snake_case : Tuple = None
__snake_case : Union[str, Any] = self.warmup
__snake_case : List[str] = 1
self.update_bar(__a )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case : Tuple = time.time()
__snake_case : int = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case : List[str] = self.elapsed_time / (value - self.start_value)
else:
__snake_case : str = None
if value >= self.total:
__snake_case : str = self.total
__snake_case : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case : Dict = self.average_time_per_item * (self.total - value)
self.update_bar(__a )
__snake_case : Optional[int] = value
__snake_case : Union[str, Any] = current_time
if self.average_time_per_item is None:
__snake_case : Optional[Any] = 1
else:
__snake_case : Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def A_ ( self : Any , __a : List[str] , __a : Tuple=None ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[int] = ' ' * (len(str(self.total ) ) - len(str(__a ) )) + str(__a )
if self.elapsed_time is None:
__snake_case : Any = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case : Optional[int] = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case : Union[str, Any] = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case : str = disp.display(disp.HTML(self.html_code ) , display_id=__a )
else:
self.output.update(disp.HTML(self.html_code ) )
def A_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[int] , __a : int , __a : str=None ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(__a )
__snake_case : Tuple = None if column_names is None else [column_names]
__snake_case : Any = None
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[int] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case : Any = disp.display(disp.HTML(self.html_code ) , display_id=__a )
else:
self.output.update(disp.HTML(self.html_code ) )
def A_ ( self : Dict , __a : int ) -> int:
'''simple docstring'''
if self.inner_table is None:
__snake_case : List[Any] = [list(values.keys() ), list(values.values() )]
else:
__snake_case : List[Any] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__a )
__snake_case : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def A_ ( self : List[str] , __a : Tuple , __a : List[str]=None , __a : Dict=300 ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = NotebookProgressBar(__a , prefix=__a , parent=self , width=__a )
return self.child_bar
def A_ ( self : List[str] ) -> int:
'''simple docstring'''
__snake_case : List[str] = None
self.display()
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[int] ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = None
__snake_case : Dict = None
__snake_case : List[str] = False
def A_ ( self : Dict , __a : List[str] , __a : Optional[Any] , __a : int , **__a : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[Any] = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
__snake_case : List[str] = 0
__snake_case : str = 0
__snake_case : Any = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
__snake_case : Optional[Any] = NotebookTrainingTracker(state.max_steps , __a )
def A_ ( self : List[Any] , __a : Tuple , __a : str , __a : int , **__a : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case : List[str] = False
def A_ ( self : Optional[int] , __a : List[Any] , __a : Optional[int] , __a : List[Any] , __a : Dict=None , **__a : Tuple ) -> Tuple:
'''simple docstring'''
if not has_length(__a ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case : Optional[Any] = self.training_tracker.add_child(len(__a ) )
else:
__snake_case : str = NotebookProgressBar(len(__a ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def A_ ( self : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : Union[str, Any] , **__a : Dict ) -> Tuple:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case : str = None
def A_ ( self : Any , __a : List[str] , __a : List[Any] , __a : Optional[Any] , __a : Any=None , **__a : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case : Tuple = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case : str = state.global_step
self.training_tracker.write_line(__a )
def A_ ( self : str , __a : Tuple , __a : Dict , __a : Optional[int] , __a : Optional[int]=None , **__a : List[str] ) -> Tuple:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case : int = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case : Union[str, Any] = log['loss']
break
if self.first_column == "Epoch":
__snake_case : List[str] = int(state.epoch )
else:
__snake_case : Union[str, Any] = state.global_step
__snake_case : Union[str, Any] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
__snake_case : Any = re.sub(r'\_loss$' , '' , __a )
__snake_case : Union[str, Any] = metrics.pop('total_flos' , __a )
__snake_case : Optional[int] = metrics.pop('epoch' , __a )
__snake_case : List[str] = metrics.pop(f'''{metric_key_prefix}_runtime''' , __a )
__snake_case : Dict = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , __a )
__snake_case : Optional[Any] = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , __a )
__snake_case : str = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , __a )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
__snake_case : Union[str, Any] = v
else:
__snake_case : Dict = k.split('_' )
__snake_case : Tuple = ' '.join([part.capitalize() for part in splits[1:]] )
__snake_case : List[Any] = v
self.training_tracker.write_line(__a )
self.training_tracker.remove_child()
__snake_case : str = None
# Evaluation takes a long time so we should force the next update.
__snake_case : str = True
def A_ ( self : List[Any] , __a : int , __a : Optional[int] , __a : Optional[Any] , **__a : Optional[Any] ) -> int:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__a )
__snake_case : Tuple = None
| 124
| 0
|
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Dict , **lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
requires_backends(self , """vision""")
requires_backends(self , """torch""")
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''')
self.check_model_type(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = {}
lowercase_ = {}
lowercase_ = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase_ = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
lowercase_ = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
lowercase_ = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
lowercase_ = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
lowercase_ = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase_ = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
lowercase_ = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
lowercase_ = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
lowercase_ = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
lowercase_ = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
lowercase_ = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
lowercase_ = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[int] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Any):
"""simple docstring"""
return super().__call__(lowerCAmelCase_ , *lowerCAmelCase_ , num_workers=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple=6_4 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : float = 5_1_2 / 1_5_0_0 , lowerCAmelCase_ : Optional[int] = 3_2 , lowerCAmelCase_ : Optional[int] = 1 , ):
"""simple docstring"""
lowercase_ = load_image(lowerCAmelCase_)
lowercase_ = self.image_processor.size["""longest_edge"""]
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self.image_processor.generate_crop_boxes(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.image_processor(images=lowerCAmelCase_ , return_tensors="""pt""")
with self.device_placement():
if self.framework == "pt":
lowercase_ = self.get_inference_context()
with inference_context():
lowercase_ = self._ensure_tensor_on_device(lowerCAmelCase_ , device=self.device)
lowercase_ = self.model.get_image_embeddings(model_inputs.pop("""pixel_values"""))
lowercase_ = image_embeddings
lowercase_ = grid_points.shape[1]
lowercase_ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""")
for i in range(0 , lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = grid_points[:, i : i + points_per_batch, :, :]
lowercase_ = input_labels[:, i : i + points_per_batch]
lowercase_ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=0.88 , lowerCAmelCase_ : Dict=0.95 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : List[str]=1 , ):
"""simple docstring"""
lowercase_ = model_inputs.pop("""input_boxes""")
lowercase_ = model_inputs.pop("""is_last""")
lowercase_ = model_inputs.pop("""original_sizes""").tolist()
lowercase_ = model_inputs.pop("""reshaped_input_sizes""").tolist()
lowercase_ = self.model(**lowerCAmelCase_)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase_ = model_outputs["""pred_masks"""]
lowercase_ = self.image_processor.post_process_masks(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , binarize=lowerCAmelCase_)
lowercase_ = model_outputs["""iou_scores"""]
lowercase_ , lowercase_ , lowercase_ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : str=0.7 , ):
"""simple docstring"""
lowercase_ = []
lowercase_ = []
lowercase_ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores"""))
all_masks.extend(model_output.pop("""masks"""))
all_boxes.append(model_output.pop("""boxes"""))
lowercase_ = torch.cat(lowerCAmelCase_)
lowercase_ = torch.cat(lowerCAmelCase_)
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = defaultdict(lowerCAmelCase_)
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase_)
lowercase_ = {}
if output_rle_mask:
lowercase_ = rle_mask
if output_bboxes_mask:
lowercase_ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 567
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
for attribute in key.split(""".""" ):
lowercase_ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
lowercase_ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "weight" in name:
lowercase_ = """weight"""
elif "bias" in name:
lowercase_ = """bias"""
else:
lowercase_ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = SEWConfig()
if is_finetuned:
lowercase_ = model.wav_encoder.wav_model.cfg
else:
lowercase_ = model.cfg
lowercase_ = fs_config.conv_bias
lowercase_ = eval(fs_config.conv_feature_layers )
lowercase_ = [x[0] for x in conv_layers]
lowercase_ = [x[1] for x in conv_layers]
lowercase_ = [x[2] for x in conv_layers]
lowercase_ = """gelu"""
lowercase_ = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
lowercase_ = 0.0
lowercase_ = fs_config.activation_fn.name
lowercase_ = fs_config.encoder_embed_dim
lowercase_ = 0.02
lowercase_ = fs_config.encoder_ffn_embed_dim
lowercase_ = 1E-5
lowercase_ = fs_config.encoder_layerdrop
lowercase_ = fs_config.encoder_attention_heads
lowercase_ = fs_config.conv_pos_groups
lowercase_ = fs_config.conv_pos
lowercase_ = len(__lowerCAmelCase )
lowercase_ = fs_config.encoder_layers
lowercase_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase_ = model.cfg
lowercase_ = fs_config.final_dropout
lowercase_ = fs_config.layerdrop
lowercase_ = fs_config.activation_dropout
lowercase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase_ = fs_config.attention_dropout
lowercase_ = fs_config.dropout_input
lowercase_ = fs_config.dropout
lowercase_ = fs_config.mask_channel_length
lowercase_ = fs_config.mask_channel_prob
lowercase_ = fs_config.mask_length
lowercase_ = fs_config.mask_prob
lowercase_ = """Wav2Vec2FeatureExtractor"""
lowercase_ = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if is_finetuned:
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase_ = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
lowercase_ = convert_config(model[0] , __lowerCAmelCase )
lowercase_ = model[0].eval()
lowercase_ = True if config.feat_extract_norm == """layer""" else False
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
lowercase_ = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.eos_index
lowercase_ = len(target_dict.symbols )
lowercase_ = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
lowercase_ = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
lowercase_ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
lowercase_ = SEWForCTC(__lowerCAmelCase )
else:
lowercase_ = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCAmelCase : str = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 567
| 1
|
'''simple docstring'''
from PIL import Image
def _UpperCamelCase ( lowerCAmelCase__: Image ) -> Image:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.size
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = image.load()
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 708
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : str = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 238
| 0
|
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ = MaskFormerConfig(backbone_config=snake_case_ )
UpperCAmelCase_ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ = 8_47
UpperCAmelCase_ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ = 1_50
UpperCAmelCase_ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ = 1_71
UpperCAmelCase_ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ = 1_33
UpperCAmelCase_ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ = 19
UpperCAmelCase_ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ = 65
UpperCAmelCase_ = "mapillary-vistas-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = dct.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCAmelCase_ ( ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , snake_case_ : str , snake_case_ : bool = False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_maskformer_config(snake_case_ )
# load original state_dict
with open(snake_case_ , "rb" ) as f:
UpperCAmelCase_ = pickle.load(snake_case_ )
UpperCAmelCase_ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_swin_q_k_v(snake_case_ , config.backbone_config )
read_in_decoder_q_k_v(snake_case_ , snake_case_ )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
# load 🤗 model
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(snake_case_ )
model.eval()
for name, param in model.named_parameters():
print(snake_case_ , param.shape )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(snake_case_ ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ = 6_55_35
else:
UpperCAmelCase_ = 2_55
UpperCAmelCase_ = True if "ade" in model_name else False
UpperCAmelCase_ = MaskFormerImageProcessor(ignore_index=snake_case_ , reduce_labels=snake_case_ )
UpperCAmelCase_ = image_processor(snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = model(**snake_case_ )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_: Tuple =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 78
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[torch.FloatTensor] = None
snake_case : torch.FloatTensor = None
snake_case : Optional[Tuple[torch.FloatTensor]] = None
snake_case : Optional[Tuple[torch.FloatTensor]] = None
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=512 , __lowerCAmelCase="cls" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = project_dim
UpperCamelCase__ = pooler_fn
UpperCamelCase__ = learn_encoder
UpperCamelCase__ = use_attention_mask
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : int = [r"""pooler""", r"""logit_scale"""]
snake_case : Tuple = [r"""position_ids""", r"""predictions.decoder.bias"""]
snake_case : str = """roberta"""
snake_case : Dict = RobertaSeriesConfig
def __init__( self , __lowerCAmelCase ):
super().__init__(__lowerCAmelCase )
UpperCamelCase__ = XLMRobertaModel(__lowerCAmelCase )
UpperCamelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase__ = getattr(__lowerCAmelCase , """has_pre_transformation""" , __lowerCAmelCase )
if self.has_pre_transformation:
UpperCamelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowerCamelCase ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ = self.base_model(
input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , position_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , output_attentions=__lowerCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__lowerCAmelCase , )
if self.has_pre_transformation:
UpperCamelCase__ = outputs["""hidden_states"""][-2]
UpperCamelCase__ = self.pre_LN(__lowerCAmelCase )
UpperCamelCase__ = self.transformation_pre(__lowerCAmelCase )
return TransformationModelOutput(
projection_state=__lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCamelCase__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 619
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( _a , unittest.TestCase ):
_lowerCamelCase : Dict = KandinskyVaaControlnetPipeline
_lowerCamelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
_lowerCamelCase : str = ["""image_embeds""", """negative_image_embeds""", """hint"""]
_lowerCamelCase : Any = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCamelCase : Optional[Any] = False
@property
def __magic_name__ ( self ):
return 32
@property
def __magic_name__ ( self ):
return 32
@property
def __magic_name__ ( self ):
return self.time_input_dim
@property
def __magic_name__ ( self ):
return self.time_input_dim * 4
@property
def __magic_name__ ( self ):
return 100
@property
def __magic_name__ ( self ):
torch.manual_seed(0 )
a_ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
a_ = UNetaDConditionModel(**snake_case_ )
return model
@property
def __magic_name__ ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self ):
torch.manual_seed(0 )
a_ = VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self ):
a_ = self.dummy_unet
a_ = self.dummy_movq
a_ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=snake_case_ , )
a_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
a_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
a_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case_ )
# create hint
a_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
if str(snake_case_ ).startswith("""mps""" ):
a_ = torch.manual_seed(snake_case_ )
else:
a_ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
a_ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __magic_name__ ( self ):
a_ = """cpu"""
a_ = self.get_dummy_components()
a_ = self.pipeline_class(**snake_case_ )
a_ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
a_ = pipe(**self.get_dummy_inputs(snake_case_ ) )
a_ = output.images
a_ = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a_ = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def __magic_name__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ):
a_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
a_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
a_ = torch.from_numpy(np.array(snake_case_ ) ).float() / 2_5_5.0
a_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
a_ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
a_ = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
a_ = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
a_ = """A robot, 4k photo"""
a_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
a_ , a_ = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
a_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
a_ = pipeline(
image_embeds=snake_case_ , negative_image_embeds=snake_case_ , hint=snake_case_ , generator=snake_case_ , num_inference_steps=100 , output_type="""np""" , )
a_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 720
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[int] , UpperCamelCase : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(UpperCamelCase ) == len(UpperCamelCase ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
a_ , a_ , a_ = equationa
a_ , a_ , a_ = equationa
# Calculate the determinants of the matrices
a_ = aa * ba - aa * ba
a_ = ca * ba - ca * ba
a_ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
a_ = determinant_x / determinant
a_ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 403
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ : Union[str, Any] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase__ : List[Any] = {
'''camembert-base''': 5_12,
}
UpperCAmelCase__ : Any = '''▁'''
class __lowercase ( snake_case__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase = CamembertTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=["<s>NOTUSED", "</s>NOTUSED"] , **lowercase_ , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__snake_case = vocab_file
__snake_case = False if not self.vocab_file else True
def _a ( self , lowercase_ , lowercase_ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , lowercase_ , lowercase_ = None) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _a ( self , lowercase_ , lowercase_ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__snake_case = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,)
| 313
|
'''simple docstring'''
from __future__ import annotations
def a ( UpperCamelCase_ : str , UpperCamelCase_ : list[str] | None = None , UpperCamelCase_ : dict[str, float] | None = None , UpperCamelCase_ : bool = False , ) -> tuple[int, float, str]:
snake_case__ =cipher_alphabet or [chr(UpperCamelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
snake_case__ ={
'a': 0.0_8_4_9_7,
'b': 0.0_1_4_9_2,
'c': 0.0_2_2_0_2,
'd': 0.0_4_2_5_3,
'e': 0.1_1_1_6_2,
'f': 0.0_2_2_2_8,
'g': 0.0_2_0_1_5,
'h': 0.0_6_0_9_4,
'i': 0.0_7_5_4_6,
'j': 0.0_0_1_5_3,
'k': 0.0_1_2_9_2,
'l': 0.0_4_0_2_5,
'm': 0.0_2_4_0_6,
'n': 0.0_6_7_4_9,
'o': 0.0_7_5_0_7,
'p': 0.0_1_9_2_9,
'q': 0.0_0_0_9_5,
'r': 0.0_7_5_8_7,
's': 0.0_6_3_2_7,
't': 0.0_9_3_5_6,
'u': 0.0_2_7_5_8,
'v': 0.0_0_9_7_8,
'w': 0.0_2_5_6_0,
'x': 0.0_0_1_5_0,
'y': 0.0_1_9_9_4,
'z': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
snake_case__ =frequencies_dict
if not case_sensitive:
snake_case__ =ciphertext.lower()
# Chi squared statistic values
snake_case__ ={}
# cycle through all of the shifts
for shift in range(len(UpperCamelCase_ ) ):
snake_case__ =''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
snake_case__ =(alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCamelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
snake_case__ =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
snake_case__ =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
snake_case__ =decrypted_with_shift.lower().count(UpperCamelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case__ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case__ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
snake_case__ =decrypted_with_shift.count(UpperCamelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case__ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case__ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
snake_case__ =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCamelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
snake_case__ =min(
UpperCamelCase_ , key=UpperCamelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
snake_case__
) , (
snake_case__
) ,
) =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 538
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
self.assertTrue(isinstance(dc.token_ids , SCREAMING_SNAKE_CASE__ ) )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ ) # fails here
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = dc.update(1 )
SCREAMING_SNAKE_CASE__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE__ : Optional[int] = dc.update(2 )
SCREAMING_SNAKE_CASE__ : List[str] = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE__ : Optional[int] = dc.update(3 )
SCREAMING_SNAKE_CASE__ : str = stepped is True and completed is True and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE__ : Optional[int] = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE__ : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE__ : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE__ : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE__ : List[str] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 705
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [file for file in os.listdir(SCREAMING_SNAKE_CASE__ ) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )]
if identifier is not None:
SCREAMING_SNAKE_CASE__ : int = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for n_ in n_identifier:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [file for file in files if n_ not in file]
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [file for file in files if n_identifier not in file]
SCREAMING_SNAKE_CASE__ : int = ignore_files or []
ignore_files.append("""__init__.py""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , SCREAMING_SNAKE_CASE__ )
if only_modules:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = file.split(""".""" )[0]
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Path("""src/transformers""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = """modeling"""
SCREAMING_SNAKE_CASE__ : int = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ , identifier=SCREAMING_SNAKE_CASE__ , ignore_files=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Path("""src/transformers""" )
SCREAMING_SNAKE_CASE__ : List[str] = """tokenization"""
self.analyze_directory(SCREAMING_SNAKE_CASE__ , identifier=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path("""src/transformers""" )
SCREAMING_SNAKE_CASE__ : List[str] = """configuration"""
self.analyze_directory(SCREAMING_SNAKE_CASE__ , identifier=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path("""src/transformers""" )
SCREAMING_SNAKE_CASE__ : Dict = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(SCREAMING_SNAKE_CASE__ , n_identifier=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Path("""docs/source""" )
SCREAMING_SNAKE_CASE__ : Any = ["""favicon.ico"""]
self.analyze_directory(SCREAMING_SNAKE_CASE__ , ignore_files=SCREAMING_SNAKE_CASE__ , only_modules=SCREAMING_SNAKE_CASE__ )
| 545
| 0
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
__A = False
__A = False
def lowercase_ ( _lowerCamelCase: Namespace ) -> Tuple:
'''simple docstring'''
return TrainCommand(UpperCamelCase__ )
class _snake_case ( a__ ):
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase : ArgumentParser ):
__lowerCamelCase : List[Any] = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=__lowercase , required=__lowercase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=__lowercase , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=__lowercase , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=__lowercase , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=__lowercase , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=__lowercase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=__lowercase , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=__lowercase , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=__lowercase , default="bert-base-uncased" , help="Model\'s name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=__lowercase , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=__lowercase , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=__lowercase , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=__lowercase , default=1E-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=__lowercase )
def __init__( self : str , UpperCAmelCase : Namespace ):
__lowerCamelCase : Tuple = logging.get_logger("transformers-cli/training" )
__lowerCamelCase : List[str] = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__lowercase )
__lowerCamelCase : List[str] = args.output
__lowerCamelCase : Optional[Any] = args.column_label
__lowerCamelCase : Optional[int] = args.column_text
__lowerCamelCase : List[str] = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
__lowerCamelCase : List[str] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
__lowerCamelCase : List[str] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowerCamelCase : int = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
__lowerCamelCase : Tuple = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowerCamelCase : Dict = args.validation_split
__lowerCamelCase : Tuple = args.train_batch_size
__lowerCamelCase : Dict = args.valid_batch_size
__lowerCamelCase : Union[str, Any] = args.learning_rate
__lowerCamelCase : Any = args.adam_epsilon
def lowerCamelCase__ ( self : Dict ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowerCamelCase__ ( self : List[str] ):
raise NotImplementedError
def lowerCamelCase__ ( self : int ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 646
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[False] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =[-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple ):
SCREAMING_SNAKE_CASE__ : List[str] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__, 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__, 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
a_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 296
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """Pix2StructImageProcessor"""
a_ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : Union[str, Any] ,_a : List[str] ,_a : int ):
'''simple docstring'''
A_ : Tuple = False
super().__init__(_a ,_a )
def __call__( self : Tuple ,_a : Tuple=None ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : Optional[int] = 2048 ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Dict ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
A_ : str = self.tokenizer
A_ : List[Any] = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
A_ : Optional[int] = self.image_processor(
_a ,return_tensors=_a ,max_patches=_a ,**_a )
else:
# add pixel_values and bbox
A_ : List[str] = self.image_processor(
_a ,return_tensors=_a ,max_patches=_a ,header_text=_a ,**_a )
if text is not None and not self.image_processor.is_vqa:
A_ : Optional[Any] = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
if "attention_mask" in text_encoding:
A_ : Union[str, Any] = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
A_ : Tuple = text_encoding.pop("""input_ids""" )
else:
A_ : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def _a ( self : Optional[int] ,*_a : List[str] ,**_a : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : Tuple ,*_a : Optional[int] ,**_a : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : str ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 703
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27
| 0
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowercase : List[str] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=16 , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=14 , UpperCamelCase__=10 , UpperCamelCase__=19 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=25 , UpperCamelCase__=5 , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = d_model
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = prediction_length
lowerCamelCase_ = context_length
lowerCamelCase_ = cardinality
lowerCamelCase_ = num_time_features
lowerCamelCase_ = lags_sequence
lowerCamelCase_ = embedding_dimension
lowerCamelCase_ = is_training
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = context_length
lowerCamelCase_ = prediction_length + label_length
lowerCamelCase_ = label_length
lowerCamelCase_ = moving_average
lowerCamelCase_ = autocorrelation_factor
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = config.context_length + max(config.lags_sequence )
lowerCamelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase_ = floats_tensor([self.batch_size, _past_length] )
lowerCamelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase_ = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.get_config()
lowerCamelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
lowerCamelCase_ = model(**UpperCamelCase__ )
lowerCamelCase_ = outputs.encoder_last_hidden_state
lowerCamelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
lowerCamelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = model.create_network_inputs(**UpperCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
lowerCamelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
lowerCamelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
lowerCamelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__lowercase :int = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowercase :Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__lowercase :Any = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
__lowercase :Optional[int] = False
__lowercase :Dict = False
__lowercase :List[str] = False
__lowercase :List[Any] = False
__lowercase :Tuple = False
__lowercase :Dict = False
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = AutoformerModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info['''missing_keys'''] , [] )
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = inspect.signature(getattr(UpperCamelCase__ , '''forward''' ) )
# The main input is the name of the argument after `self`
lowerCamelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase__ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
lowerCamelCase_ = getattr(self.model_tester , '''seq_length''' , UpperCamelCase__ )
lowerCamelCase_ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCamelCase__ )
lowerCamelCase_ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCamelCase__ )
lowerCamelCase_ = getattr(self.model_tester , '''d_model''' , UpperCamelCase__ )
lowerCamelCase_ = getattr(self.model_tester , '''num_attention_heads''' , UpperCamelCase__ )
lowerCamelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase_ = len(UpperCamelCase__ )
lowerCamelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
lowerCamelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase_ ( _lowerCamelCase : Dict="train-batch.pt" ):
lowerCamelCase_ = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=_lowerCamelCase , repo_type='''dataset''' )
lowerCamelCase_ = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCamelCase__ )
lowerCamelCase_ = prepare_batch()
with torch.no_grad():
lowerCamelCase_ = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
lowerCamelCase_ = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase_ = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCamelCase__ )
lowerCamelCase_ = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowerCamelCase_ = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
lowerCamelCase_ = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase_ = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCamelCase__ )
lowerCamelCase_ = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowerCamelCase_ = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
lowerCamelCase_ = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
lowerCamelCase_ = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=UpperCamelCase__ )
lowerCamelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 142
|
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
lowerCamelCase_ = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
lowerCamelCase_ = os.path.join(_lowerCamelCase , '''triangle.txt''' )
with open(_lowerCamelCase ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = []
for line in triangle:
lowerCamelCase_ = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
lowerCamelCase_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCamelCase_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase , _lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 142
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[int] = "microsoft/speecht5_tts"
snake_case__ : Union[str, Any] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
snake_case__ : Tuple = "text_reader"
snake_case__ : Optional[Any] = SpeechTaProcessor
snake_case__ : Optional[int] = SpeechTaForTextToSpeech
snake_case__ : Union[str, Any] = SpeechTaHifiGan
snake_case__ : Tuple = ["text"]
snake_case__ : Tuple = ["audio"]
def _UpperCamelCase ( self ) -> Union[str, Any]:
if self.post_processor is None:
SCREAMING_SNAKE_CASE : str = 'microsoft/speecht5_hifigan'
super().setup()
def _UpperCamelCase ( self , lowercase__ , lowercase__=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = self.pre_processor(text=lowercase__ , return_tensors='pt' , truncation=lowercase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
SCREAMING_SNAKE_CASE : str = torch.tensor(embeddings_dataset[7_305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _UpperCamelCase ( self , lowercase__ ) -> Union[str, Any]:
with torch.no_grad():
return self.model.generate_speech(**lowercase__ )
def _UpperCamelCase ( self , lowercase__ ) -> int:
with torch.no_grad():
return self.post_processor(lowercase__ ).cpu().detach()
| 179
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase :int = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : List[str] = "marian"
snake_case__ : Union[str, Any] = ["past_key_values"]
snake_case__ : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , lowercase__=58_101 , lowercase__=None , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.0_2 , lowercase__=58_100 , lowercase__=False , lowercase__=58_100 , lowercase__=0 , lowercase__=0 , lowercase__=True , **lowercase__ , ) -> int:
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Dict = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : int = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE : List[Any] = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : int = encoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE : int = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : List[Any] = {0: 'batch'}
SCREAMING_SNAKE_CASE : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE : str = {0: 'batch', 1: 'decoder_sequence'}
SCREAMING_SNAKE_CASE : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.num_layers
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
SCREAMING_SNAKE_CASE : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : List[str] = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowercase__ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE : int = {0: 'batch', 2: 'past_sequence + sequence'}
SCREAMING_SNAKE_CASE : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _UpperCamelCase ( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : List[Any] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : str = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : Optional[int] = dict(**lowercase__ , **lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = common_inputs['input_ids'].shape
SCREAMING_SNAKE_CASE : Tuple = common_inputs['decoder_input_ids'].shape[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.num_attention_heads
SCREAMING_SNAKE_CASE : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : str = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[str] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowercase__ , lowercase__ )] , dim=1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE : Optional[Any] = min(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = max(lowercase__ , lowercase__ ) - min_num_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : str = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowercase__ , lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def _UpperCamelCase ( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : Dict = seqlen + 2
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.num_layers
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = common_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE : List[str] = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__ )] , dim=1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def _UpperCamelCase ( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowercase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[str] = tokenizer.num_special_tokens_to_add(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowercase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Optional[int] = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Tuple = dict(tokenizer(lowercase__ , return_tensors=lowercase__ ) )
return common_inputs
def _UpperCamelCase ( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
return common_inputs
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : List[Any] = super()._flatten_past_key_values_(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
SCREAMING_SNAKE_CASE : Any = super(lowercase__ , self )._flatten_past_key_values_(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
@property
def _UpperCamelCase ( self ) -> float:
return 1E-4
| 179
| 1
|
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :Optional[int] = field
__magic_name__ :List[Any] = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths}
__magic_name__ :Optional[int] = Json(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , field=__lowerCAmelCase , **__lowerCAmelCase , )
def A ( self ):
"""simple docstring"""
# Build iterable dataset
if self.streaming:
__magic_name__ :Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__magic_name__ :int = None
__magic_name__ :Optional[Any] = None
__magic_name__ :int = None
__magic_name__ :str = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
__magic_name__ :Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
__magic_name__ :Tuple = dataset
__magic_name__ :Tuple = path_or_buf
__magic_name__ :Tuple = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__magic_name__ :Dict = num_proc
__magic_name__ :Dict = '''utf-8'''
__magic_name__ :List[Any] = to_json_kwargs
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.to_json_kwargs.pop('''path_or_buf''' , __lowerCAmelCase )
__magic_name__ :Any = self.to_json_kwargs.pop('''orient''' , '''records''' )
__magic_name__ :Dict = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__magic_name__ :Union[str, Any] = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__magic_name__ :Union[str, Any] = self.to_json_kwargs.pop('''compression''' , __lowerCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__lowerCAmelCase ) as buffer:
__magic_name__ :Optional[Any] = self._write(file_obj=__lowerCAmelCase , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
__magic_name__ :int = self._write(
file_obj=self.path_or_buf , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **self.to_json_kwargs )
return written
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :Tuple = args
__magic_name__ :int = query_table(
table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__magic_name__ :List[Any] = batch.to_pandas().to_json(
path_or_buf=__lowerCAmelCase , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **__lowerCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Tuple = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__magic_name__ :Union[str, Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__lowerCAmelCase )
else:
__magic_name__ , __magic_name__ :List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__lowerCAmelCase )
return written
| 0
|
'''simple docstring'''
from torch import nn
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 672
| 0
|
import math
def A_ ( __a : Tuple , __a : List[Any] ):
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__a )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCAmelCase = """Enter the base and the power separated by a comma: """
UpperCAmelCase , UpperCAmelCase = map(int, input(prompt).split(""","""))
UpperCAmelCase , UpperCAmelCase = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCAmelCase = res(xa, ya)
UpperCAmelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 351
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase = None
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
UpperCAmelCase = """▁"""
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] = AlbertTokenizer
def __init__( self , a_=None , a_=None , a_=True , a_=True , a_=False , a_="[CLS]" , a_="[SEP]" , a_="<unk>" , a_="[SEP]" , a_="<pad>" , a_="[CLS]" , a_="[MASK]" , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a__ = (
AddedToken(a_ , lstrip=a_ , rstrip=a_ , normalized=a_ )
if isinstance(a_ , a_ )
else mask_token
)
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , **a_ , )
a__ = do_lower_case
a__ = remove_space
a__ = keep_accents
a__ = vocab_file
a__ = False if not self.vocab_file else True
def _a ( self , a_ , a_ = None ):
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , a_ , a_ = None ):
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , a_ , a_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 351
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ = random.Random()
if is_torch_available():
import torch
def lowerCAmelCase (__A , __A=1.0 , __A=None , __A=None):
"""simple docstring"""
if rng is None:
_a = global_rng
_a = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=400 , A=2_000 , A=1 , A=0.0 , A=16_000 , A=True , A=True , ) -> Optional[Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = min_seq_length
_a = max_seq_length
_a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a = feature_size
_a = padding_value
_a = sampling_rate
_a = return_attention_mask
_a = do_normalize
def a__ (self ) -> List[str]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ (self , A=False , A=False ) -> Optional[Any]:
"""simple docstring"""
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
_a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = ASTFeatureExtractor
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ASTFeatureExtractionTester(self )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_a = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
_a = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_a = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# Test batched
_a = feat_extract(A , padding=A , return_tensors='''np''' ).input_values
_a = feat_extract(A , padding=A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_a = np.asarray(A )
_a = feat_extract(A , return_tensors='''np''' ).input_values
_a = feat_extract(A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
@require_torch
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = np.random.rand(100 ).astype(np.floataa )
_a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_a = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_a = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
_a = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a = ds.sort('''id''' ).select(range(A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
_a = self._load_datasamples(1 )
_a = ASTFeatureExtractor()
_a = feature_extractor(A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A , atol=1E-4 ) )
| 11
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , _lowercase : Any , _lowercase : Tuple=7 , _lowercase : Tuple=3 , _lowercase : str=18 , _lowercase : Union[str, Any]=30 , _lowercase : Dict=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : int=True , _lowercase : Optional[int]=False , _lowercase : str=True , _lowercase : Union[str, Any]=True , _lowercase : Any=[0.5, 0.5, 0.5] , _lowercase : str=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size if size is not None else {'height': 18, 'width': 20}
A_ = do_thumbnail
A_ = do_align_axis
A_ = do_pad
A_ = do_normalize
A_ = image_mean
A_ = image_std
def __snake_case ( self : int) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __UpperCAmelCase ( lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = DonutImageProcessor if is_vision_available() else None
def __snake_case ( self : Optional[int]) -> Union[str, Any]:
A_ = DonutImageProcessingTester(self)
@property
def __snake_case ( self : int) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Any) -> Tuple:
A_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , 'do_resize'))
self.assertTrue(hasattr(_lowercase , 'size'))
self.assertTrue(hasattr(_lowercase , 'do_thumbnail'))
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis'))
self.assertTrue(hasattr(_lowercase , 'do_pad'))
self.assertTrue(hasattr(_lowercase , 'do_normalize'))
self.assertTrue(hasattr(_lowercase , 'image_mean'))
self.assertTrue(hasattr(_lowercase , 'image_std'))
def __snake_case ( self : int) -> Optional[int]:
A_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 20})
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
# Previous config had dimensions in (width, height) order
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84))
self.assertEqual(image_processor.size , {'height': 84, 'width': 42})
def __snake_case ( self : Optional[Any]) -> Optional[Any]:
pass
@is_flaky()
def __snake_case ( self : List[str]) -> int:
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ = image_processing(_lowercase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __snake_case ( self : List[Any]) -> List[Any]:
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ = image_processing(_lowercase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __snake_case ( self : Dict) -> int:
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ = image_processing(_lowercase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 366
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ : Dict = logging.get_logger(__name__)
__magic_name__ : List[str] = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = """xlm"""
snake_case__ = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self : str , _SCREAMING_SNAKE_CASE : Tuple=3_0145 , _SCREAMING_SNAKE_CASE : Dict=2048 , _SCREAMING_SNAKE_CASE : Optional[int]=12 , _SCREAMING_SNAKE_CASE : List[str]=16 , _SCREAMING_SNAKE_CASE : List[str]=0.1 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : str=1 , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : str=512 , _SCREAMING_SNAKE_CASE : Tuple=2048**-0.5 , _SCREAMING_SNAKE_CASE : Union[str, Any]=1E-1_2 , _SCREAMING_SNAKE_CASE : int=0.0_2 , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , _SCREAMING_SNAKE_CASE : int=1 , _SCREAMING_SNAKE_CASE : int=2 , _SCREAMING_SNAKE_CASE : Optional[Any]=3 , _SCREAMING_SNAKE_CASE : Any=5 , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : Tuple="first" , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : int=5 , _SCREAMING_SNAKE_CASE : Optional[Any]=5 , _SCREAMING_SNAKE_CASE : str=0 , _SCREAMING_SNAKE_CASE : str=0 , _SCREAMING_SNAKE_CASE : Union[str, Any]=2 , _SCREAMING_SNAKE_CASE : int=0 , **_SCREAMING_SNAKE_CASE : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = emb_dim
UpperCamelCase = n_layers
UpperCamelCase = n_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = gelu_activation
UpperCamelCase = sinusoidal_embeddings
UpperCamelCase = causal
UpperCamelCase = asm
UpperCamelCase = n_langs
UpperCamelCase = use_lang_emb
UpperCamelCase = layer_norm_eps
UpperCamelCase = bos_index
UpperCamelCase = eos_index
UpperCamelCase = pad_index
UpperCamelCase = unk_index
UpperCamelCase = mask_index
UpperCamelCase = is_encoder
UpperCamelCase = max_position_embeddings
UpperCamelCase = embed_init_std
UpperCamelCase = init_std
UpperCamelCase = summary_type
UpperCamelCase = summary_use_proj
UpperCamelCase = summary_activation
UpperCamelCase = summary_proj_to_labels
UpperCamelCase = summary_first_dropout
UpperCamelCase = start_n_top
UpperCamelCase = end_n_top
UpperCamelCase = mask_token_id
UpperCamelCase = lang_id
if "n_words" in kwargs:
UpperCamelCase = kwargs['n_words']
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class A__ ( __snake_case ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 410
|
__magic_name__ : List[str] = tuple[float, float, float]
__magic_name__ : Optional[int] = tuple[float, float, float]
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Vectorad:
"""simple docstring"""
UpperCamelCase = end_pointa[0] - end_pointa[0]
UpperCamelCase = end_pointa[1] - end_pointa[1]
UpperCamelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Vectorad:
"""simple docstring"""
UpperCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
return tuple(round(_UpperCamelCase , _UpperCamelCase) for x in vector) == (0, 0, 0)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 10) -> bool:
"""simple docstring"""
UpperCamelCase = create_vector(_UpperCamelCase , _UpperCamelCase)
UpperCamelCase = create_vector(_UpperCamelCase , _UpperCamelCase)
return is_zero_vector(get_ad_vectors_cross(_UpperCamelCase , _UpperCamelCase) , _UpperCamelCase)
| 410
| 1
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a_ : str = 5_0_0_0_0_0
a_ , a_ : Any = os.path.split(__file__)
a_ : Dict = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __lowercase( UpperCAmelCase__ , **UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = dataset.map(**UpperCAmelCase__ )
@get_duration
def __lowercase( UpperCAmelCase__ , **UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = dataset.filter(**UpperCAmelCase__ )
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
lowerCamelCase = generate_example_dataset(
os.path.join(UpperCAmelCase__ , "dataset.arrow" ) , UpperCAmelCase__ , num_examples=UpperCAmelCase__ )
lowerCamelCase = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=UpperCAmelCase__ )
def tokenize(UpperCAmelCase__ ):
return tokenizer(examples["text"] )
lowerCamelCase = map(UpperCAmelCase__ )
lowerCamelCase = map(UpperCAmelCase__ , batched=UpperCAmelCase__ )
lowerCamelCase = map(UpperCAmelCase__ , function=lambda UpperCAmelCase__ : None , batched=UpperCAmelCase__ )
with dataset.formatted_as(type="numpy" ):
lowerCamelCase = map(UpperCAmelCase__ , function=lambda UpperCAmelCase__ : None , batched=UpperCAmelCase__ )
with dataset.formatted_as(type="pandas" ):
lowerCamelCase = map(UpperCAmelCase__ , function=lambda UpperCAmelCase__ : None , batched=UpperCAmelCase__ )
with dataset.formatted_as(type="torch" , columns="numbers" ):
lowerCamelCase = map(UpperCAmelCase__ , function=lambda UpperCAmelCase__ : None , batched=UpperCAmelCase__ )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
lowerCamelCase = map(UpperCAmelCase__ , function=lambda UpperCAmelCase__ : None , batched=UpperCAmelCase__ )
lowerCamelCase = map(UpperCAmelCase__ , function=UpperCAmelCase__ , batched=UpperCAmelCase__ )
lowerCamelCase = filter(UpperCAmelCase__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCAmelCase__ , "wb" ) as f:
f.write(json.dumps(UpperCAmelCase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 623
|
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if n == 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return 0
elif n == 2:
return 1
else:
lowerCamelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = 0
lowerCamelCase = 2
while digits < n:
index += 1
lowerCamelCase = len(str(fibonacci(UpperCAmelCase__ ) ) )
return index
def __lowercase( UpperCAmelCase__ = 1000 ):
"""simple docstring"""
return fibonacci_digits_index(UpperCAmelCase__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 623
| 1
|
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__A : Optional[int] = True
except ImportError:
__A : Any = False
try:
from torch.hub import _get_torch_home
__A : Any = _get_torch_home()
except ImportError:
__A : Union[str, Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__A : List[Any] = os.path.join(torch_cache_home, 'transformers')
__A : Optional[int] = 'https://cdn.huggingface.co'
__A : Optional[int] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__A : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__A : Tuple = os.path.join(PATH, 'config.yaml')
__A : int = os.path.join(PATH, 'attributes.txt')
__A : List[str] = os.path.join(PATH, 'objects.txt')
__A : Optional[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__A : Optional[int] = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__A : Optional[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__A : List[str] = 'pytorch_model.bin'
__A : Tuple = 'config.yaml'
def UpperCAmelCase ( lowerCamelCase_ :Tuple=OBJECTS , lowerCamelCase_ :Dict=ATTRIBUTES ):
'''simple docstring'''
snake_case_ : Any = []
with open(lowerCamelCase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
snake_case_ : Dict = []
with open(lowerCamelCase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : Any = OrderedDict()
with open(lowerCamelCase_ , """rb""" ) as f:
snake_case_ : int = pkl.load(lowerCamelCase_ )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
snake_case_ : List[Any] = ckp.pop(lowerCamelCase_ )
if isinstance(lowerCamelCase_ , np.ndarray ):
snake_case_ : List[Any] = torch.tensor(lowerCamelCase_ )
else:
assert isinstance(lowerCamelCase_ , torch.tensor ), type(lowerCamelCase_ )
snake_case_ : Optional[Any] = v
return r
class __UpperCamelCase :
lowercase : List[Any] = {}
def __init__( self :Optional[Any] ,_UpperCamelCase :dict ,_UpperCamelCase :str = "root" ,_UpperCamelCase :List[Any]=0 ):
snake_case_ : List[Any] = name
snake_case_ : Tuple = level
snake_case_ : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
snake_case_ : Union[str, Any] = copy.deepcopy(_UpperCamelCase )
snake_case_ : Any = copy.deepcopy(_UpperCamelCase )
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
snake_case_ : List[Any] = Config(_UpperCamelCase ,name=_UpperCamelCase ,level=level + 1 )
snake_case_ : Union[str, Any] = v
setattr(self ,_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Dict = d
def __repr__( self :List[str] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self :str ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[Any] ):
snake_case_ : List[str] = val
snake_case_ : Optional[int] = val
snake_case_ : Optional[Any] = key.split(""".""" )
snake_case_ : Optional[int] = len(_UpperCamelCase ) - 1
snake_case_ : Optional[int] = self._pointer
if len(_UpperCamelCase ) > 1:
for i, l in enumerate(_UpperCamelCase ):
if hasattr(self ,_UpperCamelCase ) and isinstance(getattr(self ,_UpperCamelCase ) ,_UpperCamelCase ):
setattr(getattr(self ,_UpperCamelCase ) ,""".""".join(levels[i:] ) ,_UpperCamelCase )
if l == last_level:
snake_case_ : Union[str, Any] = val
else:
snake_case_ : Any = pointer[l]
def a__ ( self :int ):
return self._pointer
def a__ ( self :Tuple ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Optional[Any] ):
with open(F'''{file_name}''' ,"""w""" ) as stream:
dump(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :int ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Tuple ):
with open(F'''{file_name}''' ,"""w""" ) as stream:
json.dump(_UpperCamelCase ,_UpperCamelCase )
@staticmethod
def a__ ( _UpperCamelCase :List[str] ):
with open(_UpperCamelCase ) as stream:
snake_case_ : List[Any] = load(_UpperCamelCase ,Loader=_UpperCamelCase )
return data
def __str__( self :int ):
snake_case_ : Union[str, Any] = """ """
if self._name != "root":
snake_case_ : Union[str, Any] = F'''{t * (self._level-1)}{self._name}:\n'''
else:
snake_case_ : Any = """"""
snake_case_ : Optional[int] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_UpperCamelCase ).__name__})\n'''
snake_case_ : List[Any] = level
return r[:-1]
@classmethod
def a__ ( cls :Optional[int] ,_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ):
snake_case_ : List[Any] = cls.get_config_dict(_UpperCamelCase ,**_UpperCamelCase )
return cls(_UpperCamelCase )
@classmethod
def a__ ( cls :Dict ,_UpperCamelCase :str ,**_UpperCamelCase :Dict ):
snake_case_ : int = kwargs.pop("""cache_dir""" ,_UpperCamelCase )
snake_case_ : Any = kwargs.pop("""force_download""" ,_UpperCamelCase )
snake_case_ : Optional[int] = kwargs.pop("""resume_download""" ,_UpperCamelCase )
snake_case_ : Tuple = kwargs.pop("""proxies""" ,_UpperCamelCase )
snake_case_ : Tuple = kwargs.pop("""local_files_only""" ,_UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
snake_case_ : Optional[int] = os.path.join(_UpperCamelCase ,_UpperCamelCase )
elif os.path.isfile(_UpperCamelCase ) or is_remote_url(_UpperCamelCase ):
snake_case_ : int = pretrained_model_name_or_path
else:
snake_case_ : Optional[Any] = hf_bucket_url(_UpperCamelCase ,filename=_UpperCamelCase ,use_cdn=_UpperCamelCase )
try:
# Load from URL or cache if already cached
snake_case_ : Optional[int] = cached_path(
_UpperCamelCase ,cache_dir=_UpperCamelCase ,force_download=_UpperCamelCase ,proxies=_UpperCamelCase ,resume_download=_UpperCamelCase ,local_files_only=_UpperCamelCase ,)
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
snake_case_ : int = Config.load_yaml(_UpperCamelCase )
except EnvironmentError:
snake_case_ : Union[str, Any] = """Can't load config for"""
raise EnvironmentError(_UpperCamelCase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(_UpperCamelCase ), kwargs
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = torch.load("""dump.pt""" , map_location=in_tensor.device )
snake_case_ : List[str] = in_tensor.numpy()
snake_case_ : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowerCamelCase_ , lowerCamelCase_ , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowerCamelCase_ , lowerCamelCase_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'''
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
snake_case_ : int = urlparse(lowerCamelCase_ )
return parsed.scheme in ("http", "https")
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Dict=True ):
'''simple docstring'''
snake_case_ : Any = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
snake_case_ : Optional[Any] = """/""" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Dict=0 , lowerCamelCase_ :Optional[Any]=None , ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
ua += "; " + "; ".join("""{}/{}""".format(lowerCamelCase_ , lowerCamelCase_ ) for k, v in user_agent.items() )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
ua += "; " + user_agent
snake_case_ : List[str] = {"""user-agent""": ua}
if resume_size > 0:
snake_case_ : List[str] = """bytes=%d-""" % (resume_size,)
snake_case_ : Any = requests.get(lowerCamelCase_ , stream=lowerCamelCase_ , proxies=lowerCamelCase_ , headers=lowerCamelCase_ )
if response.status_code == 4_16: # Range not satisfiable
return
snake_case_ : Dict = response.headers.get("""Content-Length""" )
snake_case_ : Dict = resume_size + int(lowerCamelCase_ ) if content_length is not None else None
snake_case_ : List[Any] = tqdm(
unit="""B""" , unit_scale=lowerCamelCase_ , total=lowerCamelCase_ , initial=lowerCamelCase_ , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCamelCase_ ) )
temp_file.write(lowerCamelCase_ )
progress.close()
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Optional[int]=10 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=False , ):
'''simple docstring'''
if cache_dir is None:
snake_case_ : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
snake_case_ : Dict = str(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
snake_case_ : List[str] = None
if not local_files_only:
try:
snake_case_ : Optional[Any] = requests.head(lowerCamelCase_ , allow_redirects=lowerCamelCase_ , proxies=lowerCamelCase_ , timeout=lowerCamelCase_ )
if response.status_code == 2_00:
snake_case_ : Any = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
snake_case_ : Tuple = url_to_filename(lowerCamelCase_ , lowerCamelCase_ )
# get cache path to put the file
snake_case_ : List[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCamelCase_ ):
return cache_path
else:
snake_case_ : List[str] = [
file
for file in fnmatch.filter(os.listdir(lowerCamelCase_ ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(lowerCamelCase_ ) > 0:
return os.path.join(lowerCamelCase_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(lowerCamelCase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
snake_case_ : Any = cache_path + """.lock"""
with FileLock(lowerCamelCase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCamelCase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
snake_case_ : List[Any] = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(lowerCamelCase_ , """a+b""" ) as f:
yield f
snake_case_ : List[str] = _resumable_file_manager
if os.path.exists(lowerCamelCase_ ):
snake_case_ : List[Any] = os.stat(lowerCamelCase_ ).st_size
else:
snake_case_ : int = 0
else:
snake_case_ : int = partial(tempfile.NamedTemporaryFile , dir=lowerCamelCase_ , delete=lowerCamelCase_ )
snake_case_ : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , lowerCamelCase_ , temp_file.name , )
http_get(
lowerCamelCase_ , lowerCamelCase_ , proxies=lowerCamelCase_ , resume_size=lowerCamelCase_ , user_agent=lowerCamelCase_ , )
os.replace(temp_file.name , lowerCamelCase_ )
snake_case_ : int = {"""url""": url, """etag""": etag}
snake_case_ : Tuple = cache_path + """.json"""
with open(lowerCamelCase_ , """w""" ) as meta_file:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return cache_path
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :str=None ):
'''simple docstring'''
snake_case_ : List[Any] = url.encode("""utf-8""" )
snake_case_ : List[str] = shaaaa(lowerCamelCase_ )
snake_case_ : Optional[int] = url_hash.hexdigest()
if etag:
snake_case_ : List[Any] = etag.encode("""utf-8""" )
snake_case_ : str = shaaaa(lowerCamelCase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int=None , lowerCamelCase_ :Dict=False , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=None , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :List[str]=False , lowerCamelCase_ :int=False , ):
'''simple docstring'''
if cache_dir is None:
snake_case_ : Dict = TRANSFORMERS_CACHE
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
snake_case_ : int = str(lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
snake_case_ : Tuple = str(lowerCamelCase_ )
if is_remote_url(lowerCamelCase_ ):
# URL, so get it from the cache (downloading if necessary)
snake_case_ : Dict = get_from_cache(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , user_agent=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
elif os.path.exists(lowerCamelCase_ ):
# File, and it exists.
snake_case_ : Optional[Any] = url_or_filename
elif urlparse(lowerCamelCase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(lowerCamelCase_ ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(lowerCamelCase_ ) )
if extract_compressed_file:
if not is_zipfile(lowerCamelCase_ ) and not tarfile.is_tarfile(lowerCamelCase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
snake_case_ : Union[str, Any] = os.path.split(lowerCamelCase_ )
snake_case_ : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
snake_case_ : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isdir(lowerCamelCase_ ) and os.listdir(lowerCamelCase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
snake_case_ : Dict = output_path + """.lock"""
with FileLock(lowerCamelCase_ ):
shutil.rmtree(lowerCamelCase_ , ignore_errors=lowerCamelCase_ )
os.makedirs(lowerCamelCase_ )
if is_zipfile(lowerCamelCase_ ):
with ZipFile(lowerCamelCase_ , """r""" ) as zip_file:
zip_file.extractall(lowerCamelCase_ )
zip_file.close()
elif tarfile.is_tarfile(lowerCamelCase_ ):
snake_case_ : Union[str, Any] = tarfile.open(lowerCamelCase_ )
tar_file.extractall(lowerCamelCase_ )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(lowerCamelCase_ ) )
return output_path_extracted
return output_path
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str="," ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
with open(lowerCamelCase_ ) as f:
snake_case_ : List[str] = eval(f.read() )
else:
snake_case_ : Optional[int] = requests.get(lowerCamelCase_ )
try:
snake_case_ : Any = requests.json()
except Exception:
snake_case_ : Tuple = req.content.decode()
assert data is not None, "could not connect"
try:
snake_case_ : Dict = eval(lowerCamelCase_ )
except Exception:
snake_case_ : Dict = data.split("""\n""" )
req.close()
return data
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = requests.get(lowerCamelCase_ )
snake_case_ : Any = np.array(Image.open(BytesIO(response.content ) ) )
return img
def UpperCAmelCase ( lowerCamelCase_ :List[str] ):
'''simple docstring'''
snake_case_ : Any = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCamelCase_ )
with open(lowerCamelCase_ , """rb""" ) as stream:
snake_case_ : Optional[int] = pkl.load(lowerCamelCase_ )
snake_case_ : int = weights.pop("""model""" )
snake_case_ : Any = {}
for k, v in model.items():
snake_case_ : Dict = torch.from_numpy(lowerCamelCase_ )
if "running_var" in k:
snake_case_ : Tuple = torch.tensor([0] )
snake_case_ : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
snake_case_ : Union[str, Any] = zero
return new
def UpperCAmelCase ( ):
'''simple docstring'''
print(F'''{os.path.abspath(os.path.join(lowerCamelCase_ , os.pardir ) )}/demo.ipynb''' )
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :Any="RGB" ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
snake_case_ : str = cva.imread(lowerCamelCase_ )
else:
snake_case_ : Any = get_image_from_url(lowerCamelCase_ )
assert img is not None, F'''could not connect to: {im}'''
snake_case_ : int = cva.cvtColor(lowerCamelCase_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
snake_case_ : Dict = img[:, :, ::-1]
return img
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[Any]=1 ):
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ))
| 708
|
'''simple docstring'''
import math
def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = len(lowerCamelCase_ )
snake_case_ : List[Any] = int(math.floor(math.sqrt(lowerCamelCase_ ) ) )
snake_case_ : str = 0
while arr[min(lowerCamelCase_ , lowerCamelCase_ ) - 1] < x:
snake_case_ : Any = step
step += int(math.floor(math.sqrt(lowerCamelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
snake_case_ : Optional[Any] = prev + 1
if prev == min(lowerCamelCase_ , lowerCamelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__A : Any = input('Enter numbers separated by a comma:\n').strip()
__A : Union[str, Any] = [int(item) for item in user_input.split(',')]
__A : Optional[int] = int(input('Enter the number to be searched:\n'))
__A : int = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F'Number {x} is at index {res}')
| 267
| 0
|
"""simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 82
|
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = [text_path]
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
_snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader({"train": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader({"train": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if split:
_snake_case = {split: text_path}
else:
_snake_case = "train"
_snake_case = {"train": text_path, "test": text_path}
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 672
| 0
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : str , __a : Any , __a : int , __a : Union[str, Any]=None , __a : Any=None ):
"""simple docstring"""
if "." in tensor_name:
_a : Optional[int] = tensor_name.split('.' )
for split in splits[:-1]:
_a : Any = getattr(__a , __a )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
_a : int = new_module
_a : Union[str, Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
_a : str = tensor_name in module._buffers
_a : List[str] = getattr(__a , __a )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
_a : Optional[Any] = False
_a : Optional[Any] = False
if is_buffer or not is_bitsandbytes_available():
_a : Dict = False
_a : List[Any] = False
else:
_a : Optional[Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_a : Dict = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_a : Union[str, Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : Any = old_value.to(__a )
elif isinstance(__a , torch.Tensor ):
_a : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
_a : str = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
_a : str = torch.tensor(__a , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __a ) and fpaa_statistics is None:
_a : List[str] = new_value.T
_a : int = old_value.__dict__
if is_abit:
_a : Dict = bnb.nn.IntaParams(__a , requires_grad=__a , **__a ).to(__a )
elif is_abit:
_a : Optional[Any] = bnb.nn.Paramsabit(__a , requires_grad=__a , **__a ).to(__a )
_a : Union[str, Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(__a ) )
else:
if value is None:
_a : Tuple = old_value.to(__a )
elif isinstance(__a , torch.Tensor ):
_a : Dict = value.to(__a )
else:
_a : Optional[Any] = torch.tensor(__a , device=__a )
if is_buffer:
_a : Any = new_value
else:
_a : List[Any] = nn.Parameter(__a , requires_grad=old_value.requires_grad )
_a : Tuple = new_value
def UpperCAmelCase_ (__a : Optional[int] , __a : Union[str, Any]=None , __a : Tuple=None , __a : int=None , __a : Tuple=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
_a : List[Any] = []
current_key_name.append(__a )
if (isinstance(__a , nn.Linear ) or isinstance(__a , __a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(__a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__a , __a ):
_a : List[str] = module.weight.shape
else:
_a : List[str] = module.in_features
_a : Optional[int] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : int = bnb.nn.LinearabitLt(
__a , __a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_a : int = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : int = bnb.nn.Linearabit(
__a , __a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_a : Optional[Any] = True
# Store the module class in case we need to transpose the weight later
_a : List[Any] = type(__a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__a )
if len(list(module.children() ) ) > 0:
_a : List[Any] = _replace_with_bnb_linear(
__a , __a , __a , __a , has_been_replaced=__a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase_ (__a : Optional[int] , __a : Tuple=None , __a : List[Any]=None , __a : Optional[Any]=None ):
"""simple docstring"""
_a : List[str] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
_a : List[str] = _replace_with_bnb_linear(
__a , __a , __a , __a )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCAmelCase_ (*__a : int , **__a : Dict ):
"""simple docstring"""
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , __a , )
return replace_with_bnb_linear(*__a , **__a )
def UpperCAmelCase_ (*__a : int , **__a : List[str] ):
"""simple docstring"""
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , __a , )
return set_module_quantized_tensor_to_device(*__a , **__a )
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
_a : int = deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : Union[str, Any] = find_tied_parameters(__a )
# For compatibility with Accelerate < 0.18
if isinstance(__a , __a ):
_a : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_a : Optional[int] = sum(__a , [] )
_a : Union[str, Any] = len(__a ) > 0
# Check if it is a base model
_a : str = not hasattr(__a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : int = list(model.named_children() )
_a : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
_a : Optional[int] = set(__a ) - set(__a )
_a : str = list(set(__a ) ) + list(__a )
# remove ".weight" from the keys
_a : Any = ['.weight', '.bias']
_a : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Optional[Any] = name.replace(__a , '' )
filtered_module_names.append(__a )
return filtered_module_names
| 704
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase = [8, 5, 9, 7]
__lowerCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : list[int] ,_a : list[list[int]] ,_a : list[list[int]] ,):
'''simple docstring'''
_a : Dict = claim_vector
_a : List[str] = allocated_resources_table
_a : List[Any] = maximum_claim_table
def __lowercase ( self : Tuple ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowercase ( self : Tuple ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowercase ( self : int ):
'''simple docstring'''
return {self.__need().index(_a ): i for i in self.__need()}
def __lowercase ( self : Optional[Any] ,**_a : Dict ):
'''simple docstring'''
_a : Optional[int] = self.__need()
_a : str = self.__allocated_resources_table
_a : int = self.__available_resources()
_a : Dict = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
_a : List[str] = False
for each_need in need_list:
_a : List[str] = True
for index, need in enumerate(_a ):
if need > available_resources[index]:
_a : Dict = False
break
if execution:
_a : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : int = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(_a )
# update available/freed resources stack
_a : Optional[int] = np.array(_a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __lowercase ( self : Tuple ):
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A : List[str] = logging.get_logger(__name__)
__A : Optional[int] = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
__A : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
__A : List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Dict = "whisper"
lowerCAmelCase_ : List[str] = ["past_key_values"]
lowerCAmelCase_ : Dict = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , UpperCAmelCase_ : List[str]=51865 , UpperCAmelCase_ : Tuple=80 , UpperCAmelCase_ : Tuple=6 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : str=6 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Union[str, Any]=1536 , UpperCAmelCase_ : Dict=1536 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=50257 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Any=1500 , UpperCAmelCase_ : List[str]=448 , UpperCAmelCase_ : List[str]=50256 , UpperCAmelCase_ : int=50256 , UpperCAmelCase_ : List[Any]=50256 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=[220, 50256] , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[int]=0.05 , UpperCAmelCase_ : List[str]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Any=7 , **UpperCAmelCase_ : List[Any] , ):
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : List[Any] = num_mel_bins
lowerCAmelCase : Optional[Any] = d_model
lowerCAmelCase : Union[str, Any] = encoder_layers
lowerCAmelCase : Dict = encoder_attention_heads
lowerCAmelCase : List[str] = decoder_layers
lowerCAmelCase : List[Any] = decoder_attention_heads
lowerCAmelCase : Tuple = decoder_ffn_dim
lowerCAmelCase : Union[str, Any] = encoder_ffn_dim
lowerCAmelCase : List[Any] = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : Tuple = activation_dropout
lowerCAmelCase : Any = activation_function
lowerCAmelCase : Any = init_std
lowerCAmelCase : str = encoder_layerdrop
lowerCAmelCase : List[str] = decoder_layerdrop
lowerCAmelCase : Optional[Any] = use_cache
lowerCAmelCase : Union[str, Any] = encoder_layers
lowerCAmelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase : Dict = max_source_positions
lowerCAmelCase : int = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase : Optional[Any] = classifier_proj_size
lowerCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase : Tuple = apply_spec_augment
lowerCAmelCase : Any = mask_time_prob
lowerCAmelCase : Any = mask_time_length
lowerCAmelCase : Optional[Any] = mask_time_min_masks
lowerCAmelCase : List[str] = mask_feature_prob
lowerCAmelCase : Any = mask_feature_length
lowerCAmelCase : Any = mask_feature_min_masks
lowerCAmelCase : List[str] = median_filter_width
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , suppress_tokens=UpperCAmelCase_ , begin_suppress_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : str ):
lowerCAmelCase : int = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
lowerCAmelCase : Dict = {0: 'batch'}
else:
lowerCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs' )
return common_inputs
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 22050 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : int = 220 , ):
lowerCAmelCase : Any = OrderedDict()
lowerCAmelCase : Dict = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCAmelCase_ , framework=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , time_duration=UpperCAmelCase_ , frequency=UpperCAmelCase_ , )
lowerCAmelCase : Dict = encoder_inputs['input_features'].shape[2]
lowerCAmelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCAmelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : int = encoder_inputs.pop('input_features' )
lowerCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
lowerCAmelCase : Optional[Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase__ ( self : Optional[Any] ):
return 1E-3
| 343
|
class __A :
def __init__( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[Any] = name
lowerCAmelCase : int = val
def __str__( self : str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : Union[str, Any] , UpperCAmelCase_ : Dict ):
return self.val < other.val
class __A :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str ):
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Optional[Any] = self.build_heap(UpperCAmelCase_ )
def __getitem__( self : Union[str, Any] , UpperCAmelCase_ : str ):
return self.get_value(UpperCAmelCase_ )
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
return (idx - 1) // 2
def lowercase__ ( self : int , UpperCAmelCase_ : str ):
return idx * 2 + 1
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Any ):
return idx * 2 + 2
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[Any] ):
return self.heap_dict[key]
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = len(UpperCAmelCase_ ) - 1
lowerCAmelCase : Union[str, Any] = self.get_parent_idx(UpperCAmelCase_ )
for idx, i in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Any = idx
lowerCAmelCase : Union[str, Any] = i.val
for i in range(UpperCAmelCase_ , -1 , -1 ):
self.sift_down(UpperCAmelCase_ , UpperCAmelCase_ )
return array
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
while True:
lowerCAmelCase : Optional[int] = self.get_left_child_idx(UpperCAmelCase_ ) # noqa: E741
lowerCAmelCase : Union[str, Any] = self.get_right_child_idx(UpperCAmelCase_ )
lowerCAmelCase : Any = idx
if l < len(UpperCAmelCase_ ) and array[l] < array[idx]:
lowerCAmelCase : Tuple = l
if r < len(UpperCAmelCase_ ) and array[r] < array[smallest]:
lowerCAmelCase : Any = r
if smallest != idx:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = array[smallest], array[idx]
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : List[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase : List[str] = smallest
else:
break
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = self.get_parent_idx(UpperCAmelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.heap[idx], self.heap[p]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase : Dict = p
lowerCAmelCase : Optional[Any] = self.get_parent_idx(UpperCAmelCase_ )
def lowercase__ ( self : str ):
return self.heap[0]
def lowercase__ ( self : int ):
lowerCAmelCase , lowerCAmelCase : str = self.heap[-1], self.heap[0]
lowerCAmelCase , lowerCAmelCase : Dict = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase__ ( self : Any , UpperCAmelCase_ : Any ):
self.heap.append(UpperCAmelCase_ )
lowerCAmelCase : str = len(self.heap ) - 1
lowerCAmelCase : List[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase__ ( self : Optional[int] ):
return len(self.heap ) == 0
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase : Optional[int] = new_value
lowerCAmelCase : str = new_value
self.sift_up(self.idx_of_element[node] )
__A : Tuple = Node('''R''', -1)
__A : int = Node('''B''', 6)
__A : int = Node('''A''', 3)
__A : Optional[Any] = Node('''X''', 1)
__A : List[str] = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__A : Optional[int] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343
| 1
|
import os
def a_ (_lowerCAmelCase : Any )-> Union[str, Any]:
snake_case: Tuple = len(grid[0] )
snake_case: Optional[int] = len(_lowerCAmelCase )
snake_case: Optional[Any] = 0
snake_case: List[Any] = 0
snake_case: Optional[int] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCAmelCase ):
for j in range(n_rows - 3 ):
snake_case: Dict = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
snake_case: List[Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
snake_case: Dict = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
snake_case: Tuple = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
snake_case: Optional[Any] = max(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if max_product > largest:
snake_case: List[str] = max_product
return largest
def a_ ()-> Any:
snake_case: str = []
with open(os.path.dirname(_lowerCAmelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
snake_case: Tuple = [[int(_lowerCAmelCase ) for i in grid[j]] for j in range(len(_lowerCAmelCase ) )]
return largest_product(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 164
|
import collections
import os
import re
from pathlib import Path
__lowerCAmelCase : Tuple = 'src/transformers'
# Matches is_xxx_available()
__lowerCAmelCase : Union[str, Any] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase : Union[str, Any] = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase : str = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase : List[str] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase : Optional[int] = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase : Optional[int] = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase : Dict = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__lowerCAmelCase : Optional[int] = re.compile(R'^\s*try:')
# Catches a line with else:
__lowerCAmelCase : Optional[int] = re.compile(R'^\s*else:')
def a_ (_lowerCAmelCase : str )-> Optional[int]:
if _re_test_backend.search(_lowerCAmelCase ) is None:
return None
snake_case: Optional[Any] = [b[0] for b in _re_backend.findall(_lowerCAmelCase )]
backends.sort()
return "_and_".join(_lowerCAmelCase )
def a_ (_lowerCAmelCase : Union[str, Any] )-> Union[str, Any]:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case: Dict = f.readlines()
snake_case: Optional[int] = 0
while line_index < len(_lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case: Optional[Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
snake_case: Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_lowerCAmelCase ):
snake_case: int = _re_one_line_import_struct.search(_lowerCAmelCase ).groups()[0]
snake_case: Any = re.findall(R"""\[([^\]]+)\]""" , _lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
snake_case: Union[str, Any] = _re_import_struct_key_value.search(_lowerCAmelCase )
if single_line_import_search is not None:
snake_case: Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
snake_case: int = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case: Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case: str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case: List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
snake_case: Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(_lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_lowerCAmelCase ) is not None:
snake_case: Optional[int] = _re_import_struct_add_many.search(_lowerCAmelCase ).groups()[0].split(""", """ )
snake_case: str = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif _re_between_brackets.search(_lowerCAmelCase ) is not None:
snake_case: Union[str, Any] = _re_between_brackets.search(_lowerCAmelCase ).groups()[0].split(""", """ )
snake_case: int = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif _re_quote_object.search(_lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(_lowerCAmelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
snake_case: List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case: Optional[Any] = []
while (
line_index < len(_lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
snake_case: int = lines[line_index]
snake_case: str = _re_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case: Union[str, Any] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case: Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case: List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case: Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
snake_case: List[str] = lines[line_index]
snake_case: str = _re_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
snake_case: Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a_ (_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] )-> Optional[int]:
def find_duplicates(_lowerCAmelCase : Union[str, Any] ):
return [k for k, v in collections.Counter(_lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case: Optional[Any] = []
for key in import_dict_objects.keys():
snake_case: List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
snake_case: Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case: Optional[Any] = """base imports""" if key == """none""" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def a_ ()-> int:
snake_case: Optional[int] = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
snake_case: Optional[int] = os.path.join(_lowerCAmelCase , """__init__.py""" )
snake_case: Any = parse_init(_lowerCAmelCase )
if objects is not None:
snake_case: List[str] = analyze_results(*_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
snake_case: Optional[int] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) > 0:
raise ValueError("""\n\n""".join(_lowerCAmelCase ) )
def a_ ()-> Dict:
snake_case: Any = []
for path, directories, files in os.walk(_lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
snake_case: Optional[int] = str((Path(_lowerCAmelCase ) / folder).relative_to(_lowerCAmelCase ) )
snake_case: str = short_path.replace(os.path.sep , """.""" )
submodules.append(_lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
snake_case: Union[str, Any] = str((Path(_lowerCAmelCase ) / fname).relative_to(_lowerCAmelCase ) )
snake_case: List[Any] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_lowerCAmelCase )
return submodules
__lowerCAmelCase : Optional[int] = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def a_ ()-> Dict:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
snake_case: Union[str, Any] = direct_transformers_import(_lowerCAmelCase )
snake_case: str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_lowerCAmelCase , """__init__.py""" ) , """r""" ) as f:
snake_case: Optional[int] = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , _lowerCAmelCase ) ) )
snake_case: Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_lowerCAmelCase ) > 0:
snake_case: Any = """\n""".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 164
| 1
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ):
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case )
else:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case )
if hidden_sizes == 192:
__SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case )
if hidden_sizes == 256:
__SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case )
if hidden_sizes == 384:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case )
from_model.eval()
__SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval()
__SCREAMING_SNAKE_CASE : int = OrderedDict()
__SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict()
__SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() )
print(len(snake_case ) , len(snake_case ) )
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE : int = weights[og_keys[i]]
our_model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) )
__SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits
assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE : Union[str, Any] = name
print(snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def a__ ( snake_case , snake_case = None , snake_case = True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE : int = 1_000
__SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : str = idalabel
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 74
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a_ : Optional[int] = logging.get_logger(__name__)
# General docstring
a_ : List[str] = 'RegNetConfig'
# Base docstring
a_ : Union[str, Any] = 'facebook/regnet-y-040'
a_ : Optional[Any] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
a_ : Dict = 'facebook/regnet-y-040'
a_ : List[Any] = 'tabby, tabby cat'
a_ : Union[str, Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__(self , __a , __a , __a = 3 , __a = 1 , __a = 1 , __a = "relu" , ):
'''simple docstring'''
super().__init__()
lowerCamelCase = nn.Convad(
__a , __a , kernel_size=__a , stride=__a , padding=kernel_size // 2 , groups=__a , bias=__a , )
lowerCamelCase = nn.BatchNormad(__a )
lowerCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.convolution(__a )
lowerCamelCase = self.normalization(__a )
lowerCamelCase = self.activation(__a )
return hidden_state
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__(self , __a ):
'''simple docstring'''
super().__init__()
lowerCamelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCamelCase = config.num_channels
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
lowerCamelCase = self.embedder(__a )
return hidden_state
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__(self , __a , __a , __a = 2 ):
'''simple docstring'''
super().__init__()
lowerCamelCase = nn.Convad(__a , __a , kernel_size=1 , stride=__a , bias=__a )
lowerCamelCase = nn.BatchNormad(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.convolution(__a )
lowerCamelCase = self.normalization(__a )
return hidden_state
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__(self , __a , __a ):
'''simple docstring'''
super().__init__()
lowerCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
lowerCamelCase = nn.Sequential(
nn.Convad(__a , __a , kernel_size=1 ) , nn.ReLU() , nn.Convad(__a , __a , kernel_size=1 ) , nn.Sigmoid() , )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.pooler(__a )
lowerCamelCase = self.attention(__a )
lowerCamelCase = hidden_state * attention
return hidden_state
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__(self , __a , __a , __a , __a = 1 ):
'''simple docstring'''
super().__init__()
lowerCamelCase = in_channels != out_channels or stride != 1
lowerCamelCase = max(1 , out_channels // config.groups_width )
lowerCamelCase = (
RegNetShortCut(__a , __a , stride=__a ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase = nn.Sequential(
RegNetConvLayer(__a , __a , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__a , __a , stride=__a , groups=__a , activation=config.hidden_act ) , RegNetConvLayer(__a , __a , kernel_size=1 , activation=__a ) , )
lowerCamelCase = ACTaFN[config.hidden_act]
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = hidden_state
lowerCamelCase = self.layer(__a )
lowerCamelCase = self.shortcut(__a )
hidden_state += residual
lowerCamelCase = self.activation(__a )
return hidden_state
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__(self , __a , __a , __a , __a = 1 ):
'''simple docstring'''
super().__init__()
lowerCamelCase = in_channels != out_channels or stride != 1
lowerCamelCase = max(1 , out_channels // config.groups_width )
lowerCamelCase = (
RegNetShortCut(__a , __a , stride=__a ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase = nn.Sequential(
RegNetConvLayer(__a , __a , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__a , __a , stride=__a , groups=__a , activation=config.hidden_act ) , RegNetSELayer(__a , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__a , __a , kernel_size=1 , activation=__a ) , )
lowerCamelCase = ACTaFN[config.hidden_act]
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = hidden_state
lowerCamelCase = self.layer(__a )
lowerCamelCase = self.shortcut(__a )
hidden_state += residual
lowerCamelCase = self.activation(__a )
return hidden_state
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__(self , __a , __a , __a , __a = 2 , __a = 2 , ):
'''simple docstring'''
super().__init__()
lowerCamelCase = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
lowerCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__a , __a , __a , stride=__a , ) , *[layer(__a , __a , __a ) for _ in range(depth - 1 )] , )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.layers(__a )
return hidden_state
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__(self , __a ):
'''simple docstring'''
super().__init__()
lowerCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__a , config.depths[1:] ):
self.stages.append(RegNetStage(__a , __a , __a , depth=__a ) )
def _a (self , __a , __a = False , __a = True ):
'''simple docstring'''
lowerCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase = hidden_states + (hidden_state,)
lowerCamelCase = stage_module(__a )
if output_hidden_states:
lowerCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a )
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = RegNetConfig
_A = 'regnet'
_A = 'pixel_values'
_A = True
def _a (self , __a ):
'''simple docstring'''
if isinstance(__a , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(__a , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _a (self , __a , __a=False ):
'''simple docstring'''
if isinstance(__a , __a ):
lowerCamelCase = value
a_ : Dict = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
a_ : Optional[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , __a ):
'''simple docstring'''
super().__init__(__a )
lowerCamelCase = config
lowerCamelCase = RegNetEmbeddings(__a )
lowerCamelCase = RegNetEncoder(__a )
lowerCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a (self , __a , __a = None , __a = None ):
'''simple docstring'''
lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase = self.embedder(__a )
lowerCamelCase = self.encoder(
__a , output_hidden_states=__a , return_dict=__a )
lowerCamelCase = encoder_outputs[0]
lowerCamelCase = self.pooler(__a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a , pooler_output=__a , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , __a ):
'''simple docstring'''
super().__init__(__a )
lowerCamelCase = config.num_labels
lowerCamelCase = RegNetModel(__a )
# classification head
lowerCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a (self , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase = self.regnet(__a , output_hidden_states=__a , return_dict=__a )
lowerCamelCase = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase = self.classifier(__a )
lowerCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase = "single_label_classification"
else:
lowerCamelCase = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCamelCase = MSELoss()
if self.num_labels == 1:
lowerCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase = CrossEntropyLoss()
lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase = BCEWithLogitsLoss()
lowerCamelCase = loss_fct(__a , __a )
if not return_dict:
lowerCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
| 623
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = '''mobilenet_v2'''
def __init__( self : int , _A : Union[str, Any]=3 , _A : int=224 , _A : str=1.0 , _A : Optional[Any]=8 , _A : Optional[Any]=8 , _A : str=6 , _A : Optional[int]=32 , _A : Tuple=True , _A : Any=True , _A : int="relu6" , _A : List[Any]=True , _A : int=0.8 , _A : List[Any]=0.02 , _A : Any=0.001 , _A : Optional[int]=255 , **_A : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_A )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
lowercase : Union[str, Any] = num_channels
lowercase : Optional[int] = image_size
lowercase : Dict = depth_multiplier
lowercase : Optional[int] = depth_divisible_by
lowercase : str = min_depth
lowercase : List[str] = expand_ratio
lowercase : Optional[int] = output_stride
lowercase : Tuple = first_layer_is_expansion
lowercase : Dict = finegrained_output
lowercase : List[str] = hidden_act
lowercase : str = tf_padding
lowercase : str = classifier_dropout_prob
lowercase : int = initializer_range
lowercase : Optional[Any] = layer_norm_eps
lowercase : Tuple = semantic_loss_ignore_index
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = version.parse('''1.11''' )
@property
def __a ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __a ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __a ( self : List[str] ) -> float:
"""simple docstring"""
return 1E-4
| 596
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A :
_UpperCamelCase : Dict = None
@experimental
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return _map_with_joblib(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = num_proc if num_proc <= len(__magic_name__ ) else len(__magic_name__ )
lowercase : Tuple = [] # We organize the splits ourselve (contiguous splits)
for index in range(__magic_name__ ):
lowercase : Optional[int] = len(__magic_name__ ) // num_proc
lowercase : List[str] = len(__magic_name__ ) % num_proc
lowercase : Union[str, Any] = div * index + min(__magic_name__ , __magic_name__ )
lowercase : List[str] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__magic_name__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(__magic_name__ )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(__magic_name__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
lowercase , lowercase : Optional[int] = None, None
if not disable_tqdm:
lowercase , lowercase : Any = (RLock(),), tqdm.set_lock
with Pool(__magic_name__ , initargs=__magic_name__ , initializer=__magic_name__ ) as pool:
lowercase : Tuple = pool.map(__magic_name__ , __magic_name__ )
logger.info(F"""Finished {num_proc} processes""" )
lowercase : Union[str, Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(__magic_name__ )} objects""" )
return mapped
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__magic_name__ ):
return joblib.Parallel()(
joblib.delayed(__magic_name__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def snake_case( __magic_name__ ) -> List[Any]:
'''simple docstring'''
lowercase : int = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowercase : List[Any] = None
| 596
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
UpperCamelCase = StableDiffusionInstructPixaPixPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A__ ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Any =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__magic_name__ : Optional[int] =PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__ : List[str] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__magic_name__ : Union[str, Any] =CLIPTextModel(UpperCamelCase__ )
__magic_name__ : Optional[int] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__magic_name__ : List[Any] ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A__ ( self :List[str] , __snake_case :List[Any] , __snake_case :Any=0 ):
'''simple docstring'''
__magic_name__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : List[str] =Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
__magic_name__ : Optional[int] =torch.manual_seed(UpperCamelCase__ )
else:
__magic_name__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
__magic_name__ : int ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[int] ="cpu" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Optional[int] =self.get_dummy_components()
__magic_name__ : Dict =StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
__magic_name__ : Optional[int] =sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ : Any =self.get_dummy_inputs(UpperCamelCase__ )
__magic_name__ : Any =sd_pipe(**UpperCamelCase__ ).images
__magic_name__ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : Optional[Any] =np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Optional[Any] ="cpu" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Union[str, Any] =self.get_dummy_components()
__magic_name__ : List[Any] =StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
__magic_name__ : Dict =sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ : Optional[Any] =self.get_dummy_inputs(UpperCamelCase__ )
__magic_name__ : Optional[int] ="french fries"
__magic_name__ : Any =sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
__magic_name__ : str =output.images
__magic_name__ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : List[str] =np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : List[str] ="cpu" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Dict =self.get_dummy_components()
__magic_name__ : Union[str, Any] =StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
__magic_name__ : List[Any] =sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ : Optional[Any] =self.get_dummy_inputs(UpperCamelCase__ )
__magic_name__ : Optional[Any] =[inputs["prompt"]] * 2
__magic_name__ : Optional[int] =np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
__magic_name__ : List[Any] =torch.from_numpy(UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
__magic_name__ : Any =image / 2 + 0.5
__magic_name__ : str =image.permute(0 , 3 , 1 , 2 )
__magic_name__ : List[Any] =image.repeat(2 , 1 , 1 , 1 )
__magic_name__ : Optional[int] =sd_pipe(**UpperCamelCase__ ).images
__magic_name__ : Any =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__magic_name__ : Tuple =np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Optional[int] ="cpu" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Dict =self.get_dummy_components()
__magic_name__ : int =EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__magic_name__ : Any =StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
__magic_name__ : Dict =sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ : List[str] =self.get_dummy_inputs(UpperCamelCase__ )
__magic_name__ : List[Any] =sd_pipe(**UpperCamelCase__ ).images
__magic_name__ : List[Any] =image[0, -3:, -3:, -1]
__magic_name__ : Optional[Any] =[round(UpperCamelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(UpperCamelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__magic_name__ : int =np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A__ ( self :int ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Dict =self.get_dummy_components()
__magic_name__ : List[str] =StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
__magic_name__ : Dict =VaeImageProcessor(do_resize=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
__magic_name__ : int =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ : str =pipe(**self.get_dummy_inputs_by_type(UpperCamelCase__ , input_image_type="""pt""" ) )[0]
__magic_name__ : Dict =components["vae"]
__magic_name__ : Union[str, Any] =self.get_dummy_inputs_by_type(UpperCamelCase__ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__magic_name__ : List[str] =vae.encode(inputs[image_param] ).latent_dist.mode()
__magic_name__ : str =pipe(**UpperCamelCase__ )[0]
__magic_name__ : Optional[int] =np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase__ , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Any , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Tuple =torch.manual_seed(UpperCamelCase__ )
__magic_name__ : Union[str, Any] =load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
__magic_name__ : str ={
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : str =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : Union[str, Any] =self.get_inputs()
__magic_name__ : Any =pipe(**UpperCamelCase__ ).images
__magic_name__ : Tuple =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ : Dict =np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : List[str] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__ )
__magic_name__ : Optional[int] =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : int =self.get_inputs()
__magic_name__ : Optional[Any] =pipe(**UpperCamelCase__ ).images
__magic_name__ : Dict =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ : Optional[Any] =np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : List[str] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__ )
__magic_name__ : str =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : Any =self.get_inputs()
__magic_name__ : Optional[int] =pipe(**UpperCamelCase__ ).images
__magic_name__ : List[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ : Optional[int] =np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =0
def callback_fn(__snake_case :Optional[int] , __snake_case :Dict , __snake_case :Optional[int] ) -> None:
__magic_name__ : Optional[Any] =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__magic_name__ : List[str] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__magic_name__ : Tuple =latents[0, -3:, -3:, -1]
__magic_name__ : Any =np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__magic_name__ : Union[str, Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__magic_name__ : Union[str, Any] =latents[0, -3:, -3:, -1]
__magic_name__ : List[Any] =np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__magic_name__ : Dict =False
__magic_name__ : Any =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
__magic_name__ : List[Any] =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : int =self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A__ ( self :Dict ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ : Dict =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
__magic_name__ : List[str] =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__ : str =self.get_inputs()
__magic_name__ : int =pipe(**UpperCamelCase__ )
__magic_name__ : List[Any] =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : List[Any] =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__magic_name__ : Optional[Any] =inputs["image"].resize((5_04, 5_04) )
__magic_name__ : Union[str, Any] ="timbrooks/instruct-pix2pix"
__magic_name__ : List[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : Union[str, Any] =pipe(**UpperCamelCase__ )
__magic_name__ : Any =output.images[0]
__magic_name__ : str =image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
__magic_name__ : Any =np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 21
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase__ :
'''simple docstring'''
lowerCamelCase_ : List[Any] = BlenderbotSmallConfig
lowerCamelCase_ : int = {}
lowerCamelCase_ : Optional[int] = """gelu"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=20 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=0 , ) -> List[Any]:
lowerCamelCase : Any = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : int = is_training
lowerCamelCase : Tuple = use_labels
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : int = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : Optional[Any] = eos_token_id
lowerCamelCase : List[Any] = pad_token_id
lowerCamelCase : int = bos_token_id
def _lowercase ( self ) -> int:
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase : Optional[int] = prepare_blenderbot_small_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
lowerCamelCase : Dict = TFBlenderbotSmallModel(config=UpperCamelCase__ ).get_decoder()
lowerCamelCase : str = inputs_dict["input_ids"]
lowerCamelCase : List[str] = input_ids[:1, :]
lowerCamelCase : str = inputs_dict["attention_mask"][:1, :]
lowerCamelCase : str = inputs_dict["head_mask"]
lowerCamelCase : Tuple = 1
# first forward pass
lowerCamelCase : List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
lowerCamelCase : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase : int = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,) -> Optional[int]:
if attention_mask is None:
lowerCamelCase : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
lowerCamelCase : Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
lowerCamelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : str = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowerCamelCase_ : List[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase_ : int = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : str = False
def _lowercase ( self ) -> Dict:
lowerCamelCase : str = TFBlenderbotSmallModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
@require_tokenizers
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : int = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
lowerCamelCase_ : Dict = """facebook/blenderbot_small-90M"""
@cached_property
def _lowercase ( self ) -> Optional[int]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase : int = self.tokenizer(self.src_text , return_tensors="tf" )
lowerCamelCase : Tuple = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
lowerCamelCase : str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 311
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase ={
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 462
|
lowerCamelCase ={"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
lowerCamelCase =["a", "b", "c", "d", "e"]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : str = start
# add current to visited
visited.append(UpperCamelCase__ )
UpperCamelCase__ : int = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCamelCase__ : int = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
UpperCamelCase__ : Optional[int] = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase =topological_sort("a", [], [])
print(sort)
| 462
| 1
|
'''simple docstring'''
import socket
def __lowercase () -> str:
"""simple docstring"""
__lowerCamelCase : Optional[Any] = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
__lowerCamelCase : Optional[int] = socket.gethostname()
__lowerCamelCase : Optional[Any] = 12_312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""", """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
__lowerCamelCase : Any = sock.recv(1_024 )
if not data:
break
out_file.write(_lowercase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 150
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase, _lowercase = None, _lowercase = None, _lowercase = None, ) -> Optional[Any]:
"""simple docstring"""
if config_name_or_path is None:
__lowerCamelCase : str = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
__lowerCamelCase : str = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__lowerCamelCase : Tuple = question_encoder_name_or_path
__lowerCamelCase : Tuple = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
__lowerCamelCase : List[str] = RagConfig.from_pretrained(_lowercase )
__lowerCamelCase : str = AutoConfig.from_pretrained(_lowercase )
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_lowercase )
__lowerCamelCase : Optional[int] = gen_config
__lowerCamelCase : str = question_encoder_config
__lowerCamelCase : List[str] = model_class.from_pretrained_question_encoder_generator(
_lowercase, _lowercase, config=_lowercase )
rag_model.save_pretrained(_lowercase )
# Sanity check.
model_class.from_pretrained(_lowercase )
# Save tokenizers.
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(_lowercase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowercase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
UpperCAmelCase__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
UpperCAmelCase__ :str = parser.parse_args()
UpperCAmelCase__ :Optional[int] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 150
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : torch.FloatTensor
a : torch.FloatTensor
class lowercase__ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
a : List[Any] = 1
@register_to_config
def __init__( self, __magic_name__ = 2000, __magic_name__ = 0.15, __magic_name__ = 0.01, __magic_name__ = 1348.0, __magic_name__ = 1E-5, __magic_name__ = 1, ) -> int:
"""simple docstring"""
# standard deviation of the initial noise distribution
UpperCamelCase__ : int = sigma_max
# setable values
UpperCamelCase__ : Optional[int] = None
self.set_sigmas(__magic_name__, __magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None ) -> int:
"""simple docstring"""
UpperCamelCase__ : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase__ : List[Any] = torch.linspace(1, __magic_name__, __magic_name__, device=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Tuple = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase__ : str = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase__ : Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__magic_name__, __magic_name__ )
UpperCamelCase__ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase__ : Optional[Any] = torch.exp(torch.linspace(math.log(__magic_name__ ), math.log(__magic_name__ ), __magic_name__ ) )
UpperCamelCase__ : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = True, ) -> Union[SdeVeOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
UpperCamelCase__ : Any = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase__ : Any = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase__ : List[Any] = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase__ : str = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase__ : List[Any] = self.get_adjacent_sigma(__magic_name__, __magic_name__ ).to(sample.device )
UpperCamelCase__ : Optional[Any] = torch.zeros_like(__magic_name__ )
UpperCamelCase__ : str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase__ : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase__ : Any = diffusion.unsqueeze(-1 )
UpperCamelCase__ : str = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase__ : Union[str, Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__magic_name__, device=sample.device, dtype=sample.dtype )
UpperCamelCase__ : Optional[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase__ : str = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__magic_name__, prev_sample_mean=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = True, ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase__ : List[str] = randn_tensor(sample.shape, layout=sample.layout, generator=__magic_name__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase__ : str = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
UpperCamelCase__ : Tuple = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
UpperCamelCase__ : Union[str, Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase__ : Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase__ : Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase__ : int = step_size.unsqueeze(-1 )
UpperCamelCase__ : int = sample + step_size * model_output
UpperCamelCase__ : List[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, ) -> torch.FloatTensor:
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase__ : Any = timesteps.to(original_samples.device )
UpperCamelCase__ : List[str] = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase__ : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__magic_name__ ) * sigmas[:, None, None, None]
)
UpperCamelCase__ : int = noise + original_samples
return noisy_samples
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 369
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ = {
'Salesforce/codegen-350M-mono': 2048,
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Union[str, Any] = VOCAB_FILES_NAMES
a : List[str] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] = ["input_ids", "attention_mask"]
a : Any = CodeGenTokenizer
def __init__( self, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__="<|endoftext|>", __magic_name__="<|endoftext|>", __magic_name__="<|endoftext|>", __magic_name__=False, **__magic_name__, ) -> Optional[int]:
"""simple docstring"""
super().__init__(
__magic_name__, __magic_name__, tokenizer_file=__magic_name__, unk_token=__magic_name__, bos_token=__magic_name__, eos_token=__magic_name__, add_prefix_space=__magic_name__, **__magic_name__, )
if kwargs.pop('''add_bos_token''', __magic_name__ ):
UpperCamelCase__ : List[Any] = kwargs.pop('''name_or_path''', '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
UpperCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', __magic_name__ ) != add_prefix_space:
UpperCamelCase__ : List[str] = getattr(__magic_name__, pre_tok_state.pop('''type''' ) )
UpperCamelCase__ : int = add_prefix_space
UpperCamelCase__ : List[str] = pre_tok_class(**__magic_name__ )
UpperCamelCase__ : List[Any] = add_prefix_space
def UpperCamelCase__ ( self, *__magic_name__, **__magic_name__ ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase__ : List[Any] = kwargs.get('''is_split_into_words''', __magic_name__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, *__magic_name__, **__magic_name__ ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase__ : Dict = kwargs.get('''is_split_into_words''', __magic_name__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self._tokenizer.model.save(__magic_name__, name=__magic_name__ )
return tuple(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = False, __magic_name__ = None, __magic_name__ = None, **__magic_name__, ) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict = super().decode(
token_ids=__magic_name__, skip_special_tokens=__magic_name__, clean_up_tokenization_spaces=__magic_name__, **__magic_name__, )
if truncate_before_pattern is not None and len(__magic_name__ ) > 0:
UpperCamelCase__ : str = self.truncate(__magic_name__, __magic_name__ )
return decoded_text
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
def find_re(__magic_name__, __magic_name__, __magic_name__ ):
UpperCamelCase__ : Dict = pattern.search(__magic_name__, __magic_name__ )
return m.start() if m else -1
UpperCamelCase__ : Union[str, Any] = [re.compile(__magic_name__, re.MULTILINE ) for pattern in truncate_before_pattern]
UpperCamelCase__ : List[Any] = list(re.finditer('''^print''', __magic_name__, re.MULTILINE ) )
if len(__magic_name__ ) > 1:
UpperCamelCase__ : Tuple = completion[: prints[1].start()]
UpperCamelCase__ : Optional[int] = list(re.finditer('''^def''', __magic_name__, re.MULTILINE ) )
if len(__magic_name__ ) > 1:
UpperCamelCase__ : Optional[Any] = completion[: defs[1].start()]
UpperCamelCase__ : str = 0
UpperCamelCase__ : Tuple = [
pos for pos in [find_re(__magic_name__, __magic_name__, __magic_name__ ) for terminal in terminals] if pos != -1
]
if len(__magic_name__ ) > 0:
return completion[: min(__magic_name__ )]
else:
return completion
| 369
| 1
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a (_SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = DebertaTokenizer
__UpperCAmelCase : str = True
__UpperCAmelCase : Optional[int] = DebertaTokenizerFast
def __snake_case ( self : Optional[Any] ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__snake_case : Tuple = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__snake_case : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__snake_case : Union[str, Any] = {'''unk_token''': '''[UNK]'''}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
def __snake_case ( self : str , **lowerCamelCase : List[Any] ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : Optional[Any] ) -> int:
__snake_case : Tuple = '''lower newer'''
__snake_case : str = '''lower newer'''
return input_text, output_text
def __snake_case ( self : Union[str, Any] ) -> int:
__snake_case : str = self.get_tokenizer()
__snake_case : int = '''lower newer'''
__snake_case : Tuple = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__snake_case : List[Any] = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Union[str, Any] = tokens + [tokenizer.unk_token]
__snake_case : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def __snake_case ( self : Optional[int] ) -> Dict:
__snake_case : List[str] = self.get_tokenizer()
__snake_case : int = tokenizer("Hello" , "World" )
__snake_case : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , __UpperCAmelCase )
@slow
def __snake_case ( self : List[str] ) -> List[str]:
__snake_case : Any = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
__snake_case : Tuple = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCAmelCase )
__snake_case : List[str] = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCAmelCase )
__snake_case : Dict = tokenizer.encode(
"sequence builders" , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
__snake_case : List[str] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
__snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__snake_case : Optional[int] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
__snake_case : Any = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__snake_case : Dict = tokenizer_class.from_pretrained("microsoft/deberta-base" )
__snake_case : Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__snake_case : Tuple = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase )
__snake_case : int = [tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) for seq in encoding['''input_ids''']]
# fmt: off
__snake_case : Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__snake_case : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __UpperCAmelCase )
for expected, decoded in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 81
|
lowercase : Dict = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def snake_case__ ( lowerCamelCase_ ):
A : List[str] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase : list[bool | None] = [None] * 10_00_00_00
lowercase : int = True
lowercase : Tuple = False
def snake_case__ ( lowerCamelCase_ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A : int = chain(next_number(lowerCamelCase_ ) )
A : Dict = number_chain
while number < 10000000:
A : Any = number_chain
number *= 10
return number_chain
def snake_case__ ( lowerCamelCase_ = 10000000 ):
for i in range(1 , lowerCamelCase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }")
| 542
| 0
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Dict:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
def _a ( self ) -> Any:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> int:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
__UpperCamelCase =NystromformerModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ )
__UpperCamelCase =model(A_ , token_type_ids=A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =NystromformerForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =NystromformerForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =NystromformerForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =self.num_labels
__UpperCamelCase =NystromformerForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =NystromformerForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[str] = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self ) -> str:
__UpperCamelCase =NystromformerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase =type
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> List[Any]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =NystromformerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[Any]:
__UpperCamelCase =NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
__UpperCamelCase =torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A_ )
__UpperCamelCase =torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1E-4 ) )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase ='the [MASK] of Belgium is Brussels'
__UpperCamelCase =AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
__UpperCamelCase =NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
__UpperCamelCase =tokenizer(A_ , return_tensors='pt' )
with torch.no_grad():
__UpperCamelCase =model(encoding.input_ids ).logits
__UpperCamelCase =token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(A_ ) , 'capital' )
| 682
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682
| 1
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCAmelCase_ = []
if isinstance(snake_case_ , snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCAmelCase_ = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(snake_case_ : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_ , snake_case_ ):
if s == e:
path_list.append(slice(snake_case_ , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(snake_case_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(snake_case_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
if not (len(snake_case_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , )
UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**snake_case_ )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_ , snake_case_ ):
def assign(snake_case_ : dict , snake_case_ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case_ , snake_case_ ):
assign(snake_case_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for xa, xa in zip(snake_case_ , snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(snake_case_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ )
return out
class __A :
def __init__(self : Dict , __a : int = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase (self : int , __a : Iterable , __a : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 78
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for char in word:
__UpperCamelCase :str = ord(SCREAMING_SNAKE_CASE )
if not _is_chinese_char(SCREAMING_SNAKE_CASE ):
return 0
return 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = set()
for token in tokens:
__UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
return word_list
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__UpperCamelCase :Dict = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
__UpperCamelCase :str = bert_tokens
__UpperCamelCase , __UpperCamelCase :int = 0, len(SCREAMING_SNAKE_CASE )
while start < end:
__UpperCamelCase :Optional[int] = True
if is_chinese(bert_word[start] ):
__UpperCamelCase :Dict = min(end - start , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ):
__UpperCamelCase :int = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__UpperCamelCase :Union[str, Any] = '''##''' + bert_word[j]
__UpperCamelCase :Dict = start + i
__UpperCamelCase :Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
__UpperCamelCase :List[str] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__UpperCamelCase :int = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
__UpperCamelCase :List[str] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :str = []
for id in input_ids:
__UpperCamelCase :Dict = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE )
input_tokens.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
__UpperCamelCase :Union[str, Any] = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ):
ref_id.append(SCREAMING_SNAKE_CASE )
ref_ids.append(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
return ref_ids
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
__UpperCamelCase :Any = f.readlines()
__UpperCamelCase :str = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__UpperCamelCase :Optional[Any] = LTP(args.ltp ) # faster in GPU device
__UpperCamelCase :Union[str, Any] = BertTokenizer.from_pretrained(args.bert )
__UpperCamelCase :Any = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
__UpperCamelCase :Optional[Any] = [json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__lowercase = parser.parse_args()
main(args)
| 167
| 0
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=None ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = list(poly_a or [0] )[:]
_lowerCamelCase : List[Any] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_lowerCamelCase : List[Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_lowerCamelCase : Optional[int] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_lowerCamelCase : List[str] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_lowerCamelCase : Tuple = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_lowerCamelCase : Union[str, Any] = self.__multiply()
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_lowerCamelCase : Any = self.c_max_length // 2
while next_ncol > 0:
_lowerCamelCase : Dict = [[] for i in range(__lowerCAmelCase )]
_lowerCamelCase : Optional[int] = self.root**next_ncol
# First half of next step
_lowerCamelCase : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_lowerCamelCase : Tuple = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_lowerCamelCase : str = new_dft
_lowerCamelCase : Optional[Any] = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : str = self.__dft('''A''' )
_lowerCamelCase : Any = self.__dft('''B''' )
_lowerCamelCase : int = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_lowerCamelCase : Optional[Any] = 2
while next_ncol <= self.c_max_length:
_lowerCamelCase : Any = [[] for i in range(__lowerCAmelCase )]
_lowerCamelCase : Tuple = self.root ** (next_ncol // 2)
_lowerCamelCase : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_lowerCamelCase : Optional[int] = new_inverse_c
next_ncol *= 2
# Unpack
_lowerCamelCase : Any = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = '''A = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_lowerCamelCase : List[str] = '''B = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_lowerCamelCase : List[str] = '''A*B = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = "canine"
def __init__( self : List[Any] , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : List[Any]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : str=3_0_7_2 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=1_6_3_8_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Dict=1E-12 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=0xe0_00 , __lowerCAmelCase : Optional[int]=0xe0_01 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Optional[int]=8 , __lowerCAmelCase : List[Any]=1_6_3_8_4 , __lowerCAmelCase : Optional[Any]=1_2_8 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : int = layer_norm_eps
# Character config:
_lowerCamelCase : Dict = downsampling_rate
_lowerCamelCase : str = upsampling_kernel_size
_lowerCamelCase : List[Any] = num_hash_functions
_lowerCamelCase : Dict = num_hash_buckets
_lowerCamelCase : Optional[Any] = local_transformer_stride
| 598
| 1
|
def UpperCamelCase ( snake_case__ : float , snake_case__ : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
UpperCamelCase : Union[str, Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case__ ) )
return round(snake_case__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__(a_, a_, unittest.TestCase ):
"""simple docstring"""
_A : Optional[Any] = StableDiffusionSAGPipeline
_A : Any = TEXT_TO_IMAGE_PARAMS
_A : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Optional[int] = False
def UpperCamelCase__ ( self ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a_ : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
a_ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ : Union[str, Any] = CLIPTextModel(_lowercase )
a_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a_ : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Dict:
if str(_lowercase ).startswith("""mps""" ):
a_ : Optional[int] = torch.manual_seed(_lowercase )
else:
a_ : str = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Tuple = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Tuple = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
a_ : List[str] = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Optional[int] = """."""
a_ : int = torch.manual_seed(0 )
a_ : Any = sag_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
a_ : Any = output.images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a_ : str = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
a_ : List[str] = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
a_ : int = """."""
a_ : Dict = torch.manual_seed(0 )
a_ : Union[str, Any] = sag_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
a_ : Optional[Any] = output.images
a_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a_ : Dict = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCamelCase__ ( self ) -> Any:
a_ : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
a_ : Optional[Any] = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[Any] = """."""
a_ : str = torch.manual_seed(0 )
a_ : int = sag_pipe(
[prompt] , width=768 , height=512 , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
a_ : Any = output.images
assert image.shape == (1, 512, 768, 3)
| 540
| 0
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCAmelCase__ ( _A = True , *_A , **_A ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
a_ = False
if main_process_only:
a_ = PartialState().local_process_index == 0
return _tqdm(*_A , **_A , disable=_A )
| 143
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = '''▁'''
UpperCamelCase__ = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase__ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
UpperCamelCase__ = {
'''google/pegasus-xsum''': 512,
}
UpperCamelCase__ = logging.get_logger(__name__)
class __lowercase ( a__ ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase__ : Tuple , lowercase__ : List[str]="<pad>" , lowercase__ : Any="</s>" , lowercase__ : Union[str, Any]="<unk>" , lowercase__ : Any="<mask_2>" , lowercase__ : int="<mask_1>" , lowercase__ : List[Any]=None , lowercase__ : List[str]=1_0_3 , lowercase__ : Optional[Dict[str, Any]] = None , **lowercase__ : List[Any] , ):
a_ = offset
if additional_special_tokens is not None:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowercase__ )}, but is"
f" {type(lowercase__ )}" )
a_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowercase__ ) , self.offset - 1 )
]
if len(set(lowercase__ ) ) != len(lowercase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
a_ = additional_special_tokens_extended
else:
a_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
a_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ , unk_token=lowercase__ , mask_token=lowercase__ , pad_token=lowercase__ , mask_token_sent=lowercase__ , offset=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
a_ = mask_token_sent
a_ = vocab_file
a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# add special tokens to encoder dict
a_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
a_ = {v: k for k, v in self.encoder.items()}
@property
def __magic_name__ ( self : Optional[Any] ):
return len(self.sp_model ) + self.offset
def __magic_name__ ( self : Dict ):
a_ = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
a_ = self.__dict__.copy()
a_ = None
return state
def __setstate__( self : Tuple , lowercase__ : str ):
a_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ = {}
a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self : Tuple , lowercase__ : str ):
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __magic_name__ ( self : List[Any] , lowercase__ : str ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
a_ = self.sp_model.piece_to_id(lowercase__ )
return sp_id + self.offset
def __magic_name__ ( self : str , lowercase__ : int ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
a_ = self.sp_model.IdToPiece(index - self.offset )
return token
def __magic_name__ ( self : Optional[int] , lowercase__ : List[str] ):
a_ = []
a_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase__ ) + token
a_ = []
else:
current_sub_tokens.append(lowercase__ )
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def __magic_name__ ( self : Tuple , lowercase__ : Optional[int]=False ):
return 1
def __magic_name__ ( self : Any , lowercase__ : Any ):
a_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __magic_name__ ( self : Union[str, Any] , lowercase__ : List , lowercase__ : Optional[List] = None , lowercase__ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase__ )
elif token_ids_a is None:
return self._special_token_mask(lowercase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Any , lowercase__ : Optional[Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
a_ = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , '''wb''' ) as fi:
a_ = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 143
| 1
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_lowerCamelCase : List[Any] = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
_lowerCamelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=False ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = create_model(
"HTSAT-tiny" , "roberta" , UpperCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=UpperCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = r".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE = key.replace(UpperCAmelCase__ , UpperCAmelCase__ )
if re.match(UpperCAmelCase__ , UpperCAmelCase__ ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE = re.match(UpperCAmelCase__ , UpperCAmelCase__ ).group(1 )
SCREAMING_SNAKE_CASE = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(UpperCAmelCase__ )//3}.linear." )
elif re.match(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = int(re.match(UpperCAmelCase__ , UpperCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE = query_layer
SCREAMING_SNAKE_CASE = key_layer
SCREAMING_SNAKE_CASE = value_layer
else:
SCREAMING_SNAKE_CASE = value
return model_state_dict
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = init_clap(UpperCAmelCase__ , enable_fusion=UpperCAmelCase__ )
clap_model.eval()
SCREAMING_SNAKE_CASE = clap_model.state_dict()
SCREAMING_SNAKE_CASE = rename_state_dict(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = ClapConfig()
SCREAMING_SNAKE_CASE = enable_fusion
SCREAMING_SNAKE_CASE = ClapModel(UpperCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
transformers_config.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
_lowerCamelCase : Dict = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 403
|
class lowercase : # Public class to implement a graph
def __init__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = row
SCREAMING_SNAKE_CASE = col
SCREAMING_SNAKE_CASE = graph
def __snake_case( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _UpperCamelCase )
def __snake_case( self : Any ) -> int: # And finally, count all islands.
'''simple docstring'''
SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
count += 1
return count
| 403
| 1
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_snake_case = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase ( datasets.BuilderConfig ):
_a = 1_0_0_0_0
_a = None
_a = None
class lowercase ( datasets.ArrowBasedBuilder ):
_a = ParquetConfig
def a__ ( self ) -> Optional[int]:
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self , _a ) -> Any:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_A : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_A : Optional[Any] = data_files
if isinstance(_a , _a ):
_A : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_A : Optional[Any] = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_A : List[str] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_A : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_A : Dict = [dl_manager.iter_files(_a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_a ):
with open(_a , """rb""" ) as f:
_A : int = datasets.Features.from_arrow_schema(pq.read_schema(_a ) )
break
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"""files""": files} ) )
return splits
def a__ ( self , _a ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_A : List[Any] = table_cast(_a , self.info.features.arrow_schema )
return pa_table
def a__ ( self , _a ) -> Tuple:
_A : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , """rb""" ) as f:
_A : Union[str, Any] = pq.ParquetFile(_a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_A : Optional[int] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_a )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise
| 705
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 0
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int]=0 ):
# Format the message.
if name is None:
_UpperCAmelCase : Dict = None
else:
_UpperCAmelCase : List[str] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
_UpperCAmelCase : Union[str, Any] = fmt.format(UpperCamelCase__ )
# Print and recurse (if needed).
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if msg is not None:
print(UpperCamelCase__ )
for k in val.keys():
recursive_print(UpperCamelCase__ , val[k] , spaces + 2 )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
print(UpperCamelCase__ , ''':''' , val.size() )
else:
print(UpperCamelCase__ , ''':''' , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_UpperCAmelCase : List[Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_UpperCAmelCase : str = (num_heads, hidden_size, num_splits) + input_shape[1:]
_UpperCAmelCase : Tuple = param.view(*UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = param.transpose(0 , 2 )
_UpperCAmelCase : int = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_UpperCAmelCase : int = (num_heads, num_splits, hidden_size) + input_shape[1:]
_UpperCAmelCase : Optional[int] = param.view(*UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = param.transpose(0 , 1 ).contiguous()
_UpperCAmelCase : str = param.view(*UpperCamelCase__ )
return param
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] ):
# The converted output model.
_UpperCAmelCase : List[Any] = {}
# old versions did not store training args
_UpperCAmelCase : str = input_state_dict.get('''args''' , UpperCamelCase__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_UpperCAmelCase : Any = ds_args.padded_vocab_size
_UpperCAmelCase : Dict = ds_args.max_position_embeddings
_UpperCAmelCase : Optional[int] = ds_args.hidden_size
_UpperCAmelCase : int = ds_args.num_layers
_UpperCAmelCase : Optional[Any] = ds_args.num_attention_heads
_UpperCAmelCase : List[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_UpperCAmelCase : Optional[Any] = config.n_head
# The hidden_size per head.
_UpperCAmelCase : Optional[int] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_UpperCAmelCase : Tuple = input_state_dict['''checkpoint_version''']
else:
_UpperCAmelCase : List[str] = 0.0
# The model.
_UpperCAmelCase : Any = input_state_dict['''model''']
# The language model.
_UpperCAmelCase : Tuple = model['''language_model''']
# The embeddings.
_UpperCAmelCase : List[str] = lm['''embedding''']
# The word embeddings.
_UpperCAmelCase : Union[str, Any] = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
_UpperCAmelCase : Optional[int] = word_embeddings[: config.vocab_size, :]
_UpperCAmelCase : Optional[Any] = word_embeddings
# The position embeddings.
_UpperCAmelCase : Union[str, Any] = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_UpperCAmelCase : Optional[Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
_UpperCAmelCase : Union[str, Any] = pos_embeddings
# The transformer.
_UpperCAmelCase : Tuple = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
_UpperCAmelCase : Optional[int] = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
_UpperCAmelCase : Any = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_UpperCAmelCase : Union[str, Any] = layer_re.match(UpperCamelCase__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_UpperCAmelCase : int = int(m.group(1 ) )
# The name of the operation.
_UpperCAmelCase : Tuple = m.group(2 )
# Is it a weight or a bias?
_UpperCAmelCase : Any = m.group(3 )
# The name of the layer.
_UpperCAmelCase : Union[str, Any] = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
_UpperCAmelCase : List[str] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
_UpperCAmelCase : Any = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_UpperCAmelCase : str = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
_UpperCAmelCase : int = torch.tensor(-1E4 , dtype=torch.floataa )
_UpperCAmelCase : str = masked_bias
_UpperCAmelCase : Dict = fix_query_key_value_ordering(UpperCamelCase__ , UpperCamelCase__ , 3 , UpperCamelCase__ , UpperCamelCase__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_UpperCAmelCase : Tuple = out_val.transpose(0 , 1 ).contiguous()
# Store.
_UpperCAmelCase : Dict = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_UpperCAmelCase : str = fix_query_key_value_ordering(UpperCamelCase__ , UpperCamelCase__ , 3 , UpperCamelCase__ , UpperCamelCase__ )
# Store. No change of shape.
_UpperCAmelCase : str = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_UpperCAmelCase : List[Any] = megatron_to_transformers[op_name]
_UpperCAmelCase : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_UpperCAmelCase : List[Any] = megatron_to_transformers[op_name]
_UpperCAmelCase : Optional[int] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_UpperCAmelCase : Optional[Any] = transformer['''final_layernorm.weight''']
_UpperCAmelCase : Any = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
_UpperCAmelCase : str = word_embeddings
# It should be done!
return output_state_dict
def lowerCamelCase_ ():
# Create the argument parser.
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=UpperCamelCase__ , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=UpperCamelCase__ , help='''An optional config json file describing the pre-trained model.''' , )
_UpperCAmelCase : int = parser.parse_args()
# Extract the basename.
_UpperCAmelCase : List[str] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
_UpperCAmelCase : Dict = torch.load(UpperCamelCase__ , map_location='''cpu''' )
else:
_UpperCAmelCase : Any = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
_UpperCAmelCase : Any = input_state_dict.get('''args''' , UpperCamelCase__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_UpperCAmelCase : str = '''gelu_fast'''
elif ds_args.openai_gelu:
_UpperCAmelCase : Tuple = '''gelu_new'''
else:
_UpperCAmelCase : Optional[Any] = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
_UpperCAmelCase : Any = '''gelu_new'''
# Spell out all parameters in case the defaults change.
_UpperCAmelCase : Any = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=UpperCamelCase__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=UpperCamelCase__ , summary_activation=UpperCamelCase__ , summary_proj_to_labels=UpperCamelCase__ , summary_first_dropout=0.1 , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
_UpperCAmelCase : Any = GPTaConfig.from_json_file(args.config_file )
_UpperCAmelCase : Optional[int] = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
_UpperCAmelCase : List[Any] = convert_megatron_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(UpperCamelCase__ , UpperCamelCase__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_UpperCAmelCase : Dict = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_UpperCAmelCase : Optional[int] = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
_UpperCAmelCase : Optional[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
_UpperCAmelCase : Optional[int] = '''gpt2'''
_UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = type(UpperCamelCase__ ).__name__
_UpperCAmelCase : Union[str, Any] = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(UpperCamelCase__ )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(UpperCamelCase__ )
# Store the state_dict to file.
_UpperCAmelCase : Dict = os.path.join(UpperCamelCase__ , '''pytorch_model.bin''' )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 506
|
"""simple docstring"""
import qiskit
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
_UpperCAmelCase : Any = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase : Tuple = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase : Dict = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Tuple = single_qubit_measure(2, 2)
print(f"Total count for various states are: {counts}")
| 506
| 1
|
from __future__ import annotations
def __lowercase ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
__lowercase = len(_UpperCAmelCase )
# We need to create solution object to save path.
__lowercase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
__lowercase = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print("\n".join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
'''simple docstring'''
__lowercase = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
__lowercase = 1
return True
__lowercase = (not i < 0) and (not j < 0) # Check lower bounds
__lowercase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowercase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowercase = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
__lowercase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576
|
from __future__ import annotations
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(_UpperCAmelCase ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
for j in range(_UpperCAmelCase ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> list[float]:
'''simple docstring'''
__lowercase = [float("inf" )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('Enter number of vertices: ').strip())
lowerCAmelCase__ = int(input('Enter number of edges: ').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
lowerCAmelCase__ = {'src': src, 'dst': dest, 'weight': weight}
lowerCAmelCase__ = int(input('\nEnter shortest path source:').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 576
| 1
|
"""simple docstring"""
from math import pi
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->float:
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 118
|
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_lowercase = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def lowerCAmelCase__ ( __magic_name__ = "dhaka" , __magic_name__ = 5 ) ->int:
__lowercase = min(__magic_name__ , 5_0 ) # Prevent abuse!
__lowercase = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
__lowercase = requests.get("https://www.google.com/search" , params=__magic_name__ , headers=__magic_name__ )
__lowercase = BeautifulSoup(html.text , "html.parser" )
__lowercase = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
__lowercase = json.dumps(__magic_name__ )
__lowercase = json.loads(__magic_name__ )
__lowercase = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , __magic_name__ , )
if not matched_google_image_data:
return 0
__lowercase = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(__magic_name__ ) , )
__lowercase = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , __magic_name__ , )
for index, fixed_full_res_image in enumerate(__magic_name__ ):
if index >= max_images:
return index
__lowercase = bytes(__magic_name__ , "ascii" ).decode(
"unicode-escape" )
__lowercase = bytes(__magic_name__ , "ascii" ).decode(
"unicode-escape" )
__lowercase = urllib.request.build_opener()
__lowercase = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(__magic_name__ )
__lowercase = F'''query_{query.replace(' ' , '_' )}'''
if not os.path.exists(__magic_name__ ):
os.makedirs(__magic_name__ )
urllib.request.urlretrieve( # noqa: S310
__magic_name__ , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
_lowercase = download_images_from_google_query(sys.argv[1])
print(F"{image_count} images were downloaded to disk.")
except IndexError:
print('''Please provide a search term.''')
raise
| 118
| 1
|
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
def __init__(self : Any , snake_case_ : int = 6 ):
__a : Node | None = None
__a : Node | None = None
self.create_linked_list(snake_case_ )
def lowerCAmelCase (self : List[str] , snake_case_ : int ):
__a : Optional[Any] = Node()
__a : str = current_node
__a : Union[str, Any] = current_node
__a : Optional[Any] = current_node
for _ in range(1 , snake_case_ ):
__a : Any = Node()
__a : List[str] = current_node
__a : int = previous_node
__a : str = current_node
__a : Optional[Any] = self.front
__a : List[Any] = previous_node
def lowerCAmelCase (self : List[str] ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCAmelCase (self : int ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__a : Dict = self.rear.next
if self.rear:
__a : List[str] = data
def lowerCAmelCase (self : Tuple ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__a : str = self.front.data
__a : Any = None
return data
__a : Optional[int] = self.front
__a : List[Any] = old_front.next
__a : Optional[Any] = old_front.data
__a : Any = None
return data
def lowerCAmelCase (self : Optional[Any] ):
if self.is_empty():
raise Exception('''Empty Queue''' )
def lowerCAmelCase (self : Union[str, Any] ):
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class UpperCamelCase__ :
def __init__(self : Dict ):
__a : Any | None = None
__a : Node | None = None
__a : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCamelCase__ :
def __init__(self : Optional[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any]=1_3 , snake_case_ : int=1_0 , snake_case_ : Any=3 , snake_case_ : int=2 , snake_case_ : List[str]=2 , snake_case_ : str=True , snake_case_ : Union[str, Any]=True , snake_case_ : List[str]=3_2 , snake_case_ : Optional[Any]=5 , snake_case_ : Dict=4 , snake_case_ : Any=3_7 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : Optional[Any]=1_0 , snake_case_ : str=0.02 , snake_case_ : List[str]="divided_space_time" , snake_case_ : int=None , ):
__a : Dict = parent
__a : List[str] = batch_size
__a : Union[str, Any] = image_size
__a : Tuple = num_channels
__a : Union[str, Any] = patch_size
__a : Optional[Any] = num_frames
__a : str = is_training
__a : List[str] = use_labels
__a : str = hidden_size
__a : List[str] = num_hidden_layers
__a : List[str] = num_attention_heads
__a : List[Any] = intermediate_size
__a : int = hidden_act
__a : Tuple = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = attention_type
__a : Tuple = initializer_range
__a : Tuple = scope
__a : str = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__a : int = (image_size // patch_size) ** 2
__a : int = (num_frames) * self.num_patches_per_frame + 1
def lowerCAmelCase (self : Dict ):
__a : Dict = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__a : List[Any] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__a : Dict = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase (self : Optional[Any] ):
__a : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__a : Tuple = self.num_labels
return config
def lowerCAmelCase (self : Optional[Any] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : str ):
__a : List[str] = TimesformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Any = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : str ):
__a : Optional[Any] = TimesformerForVideoClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ )
# verify the logits shape
__a : str = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , snake_case_ )
def lowerCAmelCase (self : str ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Any = config_and_inputs
__a : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Dict = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Tuple = False
def lowerCAmelCase (self : List[Any] ):
__a : Optional[Any] = TimesformerModelTester(self )
__a : List[Any] = ConfigTester(
self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def lowerCAmelCase (self : int , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[int]=False ):
__a : Optional[Any] = copy.deepcopy(snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
__a : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowerCAmelCase (self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowerCAmelCase (self : Dict ):
pass
def lowerCAmelCase (self : Tuple ):
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def lowerCAmelCase (self : str ):
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(snake_case_ )
__a : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[int] = [*signature.parameters.keys()]
__a : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*snake_case_ )
@slow
def lowerCAmelCase (self : List[Any] ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = TimesformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase (self : Any ):
if not self.has_attentions:
pass
else:
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
for model_class in self.all_model_classes:
__a : int = self.model_tester.seq_length
__a : str = self.model_tester.num_frames
__a : str = True
__a : str = False
__a : Union[str, Any] = True
__a : int = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__a : List[str] = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__a : Any = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Tuple = True
__a : Tuple = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__a : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__a : str = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__a : List[Any] = len(snake_case_ )
# Check attention is always last and order is fine
__a : Dict = True
__a : Dict = True
__a : Optional[int] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__a : str = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 1 , len(snake_case_ ) )
__a : Optional[int] = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCAmelCase (self : List[str] ):
def check_hidden_states_output(snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : int ):
__a : Any = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__a : List[Any] = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__a : Union[str, Any] = outputs.hidden_states
__a : Any = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(snake_case_ ) , snake_case_ )
__a : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Union[str, Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : str = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __UpperCamelCase ( ):
__a : str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
__a : List[str] = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase (self : List[Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase (self : Optional[Any] ):
__a : List[str] = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
snake_case_ )
__a : str = self.default_image_processor
__a : Optional[int] = prepare_video()
__a : Union[str, Any] = image_processor(video[:8] , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
__a : int = model(**snake_case_ )
# verify the logits
__a : Tuple = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__a : Optional[Any] = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 326
| 1
|
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ ):
_A = len(snake_case_ )
_A = [0] * len_array
if len_array > 0:
_A = array[0]
for i in range(1 , snake_case_ ):
_A = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase__ ( self , snake_case_ ):
_A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(snake_case_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
_A = int(number**0.5 )
return number == sq * sq
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
"""simple docstring"""
_A = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_A = x_den * y_den * z_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 35 ) -> int:
"""simple docstring"""
_A = set()
_A = 42
_A = Fraction(0 )
_A = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_A = x_num * y_den + x_den * y_num
_A = x_den * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_A = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_A = x_num * y_num
_A = x_den * y_num + x_num * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = x_num * x_num * y_num * y_num
_A = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 27
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a_ :
UpperCamelCase_ : Tuple = 42
UpperCamelCase_ : Union[str, Any] = None
UpperCamelCase_ : Tuple = None
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Node(1 )
lowerCAmelCase__ = Node(2 )
lowerCAmelCase__ = Node(3 )
lowerCAmelCase__ = Node(4 )
lowerCAmelCase__ = Node(5 )
return tree
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
if root is None:
return output
lowerCAmelCase__ = deque([root] )
while process_queue:
lowerCAmelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if root is None:
return []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = height(_UpperCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_UpperCamelCase , _UpperCamelCase ) )
lowerCAmelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(_UpperCamelCase , _UpperCamelCase ) )
lowerCAmelCase__ = 0
return output
def _UpperCAmelCase ( ): # Main function for testing.
"""simple docstring"""
lowerCAmelCase__ = make_tree()
print(f"""In-order Traversal: {inorder(_UpperCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(_UpperCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(_UpperCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(_UpperCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_UpperCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(_UpperCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(_UpperCamelCase , level=_UpperCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 721
|
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCamelCase__ )
else:
lowerCAmelCase__ = sylvester(number - 1 )
lowerCAmelCase__ = num - 1
lowerCAmelCase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 674
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class a_ (lowercase__ ):
__lowerCAmelCase : int = 'donut-swin'
__lowerCAmelCase : Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=2_2_4 , snake_case_=4 , snake_case_=3 , snake_case_=9_6 , snake_case_=[2, 2, 6, 2] , snake_case_=[3, 6, 1_2, 2_4] , snake_case_=7 , snake_case_=4.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=0.02 , snake_case_=1E-5 , **snake_case_ , ):
super().__init__(**_UpperCamelCase )
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : Any = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : Tuple = len(_UpperCamelCase )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Optional[Any] = window_size
_lowerCAmelCase : List[str] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = drop_path_rate
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Dict = use_absolute_embeddings
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Optional[Any] = int(embed_dim * 2 ** (len(_UpperCamelCase ) - 1) )
| 384
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A : List[Any] = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
snake_case_ : int = []
for num in range(len(lowerCamelCase_ ) ):
snake_case_ : List[Any] = 0
while 2 * i * i <= odd_composites[num]:
snake_case_ : List[str] = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase_ ) == n:
return list_nums
return []
def UpperCAmelCase ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 334
| 0
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
SCREAMING_SNAKE_CASE__ = number_of_bytes // partitions
SCREAMING_SNAKE_CASE__ = []
for i in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = i * bytes_per_partition + 1
SCREAMING_SNAKE_CASE__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 538
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Dict=13 , _snake_case : Optional[Any]=7 , _snake_case : Union[str, Any]=False , _snake_case : Any=True , _snake_case : int=False , _snake_case : int=True , _snake_case : Tuple=33 , _snake_case : Optional[int]=32 , _snake_case : List[Any]=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Dict=0.1 , _snake_case : Dict=0.1 , _snake_case : Tuple=512 , _snake_case : Any=16 , _snake_case : Union[str, Any]=2 , _snake_case : List[str]=0.02 , _snake_case : Optional[Any]=3 , _snake_case : int=4 , _snake_case : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowerCAmelCase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Tuple ) -> Any:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : str , _snake_case : int , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Any , _snake_case : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = EsmModel(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case )
SCREAMING_SNAKE_CASE__ = model(_snake_case )
SCREAMING_SNAKE_CASE__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : int ) -> int:
SCREAMING_SNAKE_CASE__ = EsmForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Dict , _snake_case : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = EsmForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = False
a = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
a = ()
a = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
a = True
def lowerCAmelCase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = EsmModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def lowerCAmelCase_ ( self : str ) -> List[str]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowerCAmelCase_ ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ = type
self.model_tester.create_and_check_model(*_snake_case )
def lowerCAmelCase_ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def lowerCAmelCase_ ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def lowerCAmelCase_ ( self : Tuple ) -> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = EsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE__ = EsmEmbeddings(config=_snake_case )
SCREAMING_SNAKE_CASE__ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE__ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE__ = create_position_ids_from_input_ids(_snake_case , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_snake_case , _snake_case ) ) )
def lowerCAmelCase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE__ = EsmEmbeddings(config=_snake_case )
SCREAMING_SNAKE_CASE__ = torch.empty(2 , 4 , 30 )
SCREAMING_SNAKE_CASE__ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE__ = torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE__ = embeddings.create_position_ids_from_inputs_embeds(_snake_case )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_snake_case , _snake_case ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def lowerCAmelCase_ ( self : List[str] ) -> int:
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCAmelCase_ ( self : Any ) -> Tuple:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
pass
@require_torch
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Dict ) -> str:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ = model(_snake_case )[0]
SCREAMING_SNAKE_CASE__ = 33
SCREAMING_SNAKE_CASE__ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _snake_case )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Any:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE__ = model(_snake_case )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) )
| 538
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(SCREAMING_SNAKE_CASE_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 653
|
"""simple docstring"""
from manim import *
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('CPU' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('GPU' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('Model' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowerCAmelCase__ ):
rect.set_stroke(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCAmelCase__ , buff=0.0 )
self.add(lowerCAmelCase__ )
model_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('Loaded Checkpoint' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 )
target.move_to(lowerCAmelCase__ )
ckpt_arr.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('Disk' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) , Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) )
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) )
self.play(*lowerCAmelCase__ )
self.play(FadeOut(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) )
self.play(
FadeOut(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) , )
self.wait()
| 247
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 1_8, 2]
lowerCAmelCase__ : Any = True if "large" in model_name or "huge" in model_name else False
lowerCAmelCase__ : Union[str, Any] = True if "large" in model_name or "huge" in model_name else False
lowerCAmelCase__ : Optional[int] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase__ : List[Any] = [3, 3, 3, 3]
lowerCAmelCase__ : Optional[int] = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase__ : str = [4, 4, 4, 4]
lowerCAmelCase__ : int = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase__ : Dict = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase__ : Tuple = [3, 3, 3, 3]
else:
lowerCAmelCase__ : List[str] = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase__ : Optional[int] = 9_6
elif "small" in model_name:
lowerCAmelCase__ : int = 9_6
elif "base" in model_name:
lowerCAmelCase__ : Any = 1_2_8
elif "large" in model_name:
lowerCAmelCase__ : Dict = 1_9_2
elif "xlarge" in model_name:
lowerCAmelCase__ : str = 2_5_6
elif "huge" in model_name:
lowerCAmelCase__ : str = 3_5_2
# set label information
lowerCAmelCase__ : Any = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowerCAmelCase__ : Any = "imagenet-22k-id2label.json"
else:
lowerCAmelCase__ : Tuple = "imagenet-1k-id2label.json"
lowerCAmelCase__ : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCAmelCase__ : Any = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCAmelCase__ : List[Any] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowerCAmelCase__ : Tuple = "encoder." + name
if "encoder.layers" in name:
lowerCAmelCase__ : str = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowerCAmelCase__ : Any = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowerCAmelCase__ : Any = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase__ : int = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase__ : Dict = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowerCAmelCase__ : List[Any] = "layernorm.weight"
if name == "norm.bias":
lowerCAmelCase__ : str = "layernorm.bias"
if "head" in name:
lowerCAmelCase__ : Any = name.replace("head" , "classifier" )
else:
lowerCAmelCase__ : Optional[int] = "focalnet." + name
return name
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowerCAmelCase__ : Optional[Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase_ )
lowerCAmelCase__ : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase__ : Optional[Any] = state_dict.pop(lowerCamelCase_ )
lowerCAmelCase__ : List[str] = val
lowerCAmelCase__ : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
lowerCAmelCase__ : Dict = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
lowerCAmelCase__ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ : Tuple = BitImageProcessor(
do_resize=lowerCamelCase_ , size={"shortest_edge": 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=2_2_4 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
lowerCAmelCase__ : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
lowerCAmelCase__ : List[Any] = processor(images=lowerCamelCase_ , return_tensors="pt" )
lowerCAmelCase__ : Union[str, Any] = transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase__ : Optional[Any] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
lowerCAmelCase__ : str = model(**lowerCamelCase_ )
lowerCAmelCase__ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase__ : List[str] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowerCAmelCase__ : Optional[Any] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase__ : str = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowerCAmelCase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase__ : int = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
snake_case = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 568
|
'''simple docstring'''
import os
import sys
snake_case = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
snake_case = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCAmelCase_ ( *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return AutoConfig.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCAmelCase_ ( *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCAmelCase_ ( *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return AutoModel.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCAmelCase_ ( *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCAmelCase_ ( *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCAmelCase_ ( *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCAmelCase_ ( *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
| 568
| 1
|
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
'''simple docstring'''
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE__ )} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class a__ :
'''simple docstring'''
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The input training data file (a text file).'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
A : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
A : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
if self.train_file is not None:
__A= self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__A= self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Dict,_SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(UpperCamelCase_,'r',encoding='utf-8' ) as f:
__A= [json.loads(UpperCamelCase_ ) for line in f.read().splitlines() if (len(UpperCamelCase_ ) > 0 and not line.isspace())]
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
__A= {c: dataset[c] for c in dataset.column_names}
__A= refs
return Dataset.from_dict(UpperCamelCase_ )
def UpperCAmelCase__( ):
"""simple docstring"""
__A= HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A, __A, __A= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A, __A, __A= parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__A= None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__A= get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',datefmt='%m/%d/%Y %H:%M:%S',handlers=[logging.StreamHandler(sys.stdout )],)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s',UpperCamelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A= load_dataset(data_args.dataset_name,data_args.dataset_config_name )
if "validation" not in datasets.keys():
__A= load_dataset(
data_args.dataset_name,data_args.dataset_config_name,split=f"""train[:{data_args.validation_split_percentage}%]""",)
__A= load_dataset(
data_args.dataset_name,data_args.dataset_config_name,split=f"""train[{data_args.validation_split_percentage}%:]""",)
else:
__A= {}
if data_args.train_file is not None:
__A= data_args.train_file
if data_args.validation_file is not None:
__A= data_args.validation_file
__A= data_args.train_file.split('.' )[-1]
if extension == "txt":
__A= 'text'
__A= load_dataset(UpperCamelCase_,data_files=UpperCamelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A= {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__A= AutoConfig.from_pretrained(model_args.config_name,**UpperCamelCase_ )
elif model_args.model_name_or_path:
__A= AutoConfig.from_pretrained(model_args.model_name_or_path,**UpperCamelCase_ )
else:
__A= CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
__A= {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__A= AutoTokenizer.from_pretrained(model_args.tokenizer_name,**UpperCamelCase_ )
elif model_args.model_name_or_path:
__A= AutoTokenizer.from_pretrained(model_args.model_name_or_path,**UpperCamelCase_ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__A= AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path,from_tf=bool('.ckpt' in model_args.model_name_or_path ),config=UpperCamelCase_,cache_dir=model_args.cache_dir,revision=model_args.model_revision,use_auth_token=True if model_args.use_auth_token else None,)
else:
logger.info('Training new model from scratch' )
__A= AutoModelForMaskedLM.from_config(UpperCamelCase_ )
model.resize_token_embeddings(len(UpperCamelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__A= datasets['train'].column_names
else:
__A= datasets['validation'].column_names
__A= 'text' if 'text' in column_names else column_names[0]
__A= 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_SCREAMING_SNAKE_CASE : int ):
# Remove empty lines
__A= [line for line in examples['text'] if len(UpperCamelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['text'],padding=UpperCamelCase_,truncation=UpperCamelCase_,max_length=data_args.max_seq_length )
__A= datasets.map(
UpperCamelCase_,batched=UpperCamelCase_,num_proc=data_args.preprocessing_num_workers,remove_columns=[text_column_name],load_from_cache_file=not data_args.overwrite_cache,)
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__A= add_chinese_references(tokenized_datasets['train'],data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__A= add_chinese_references(
tokenized_datasets['validation'],data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__A= data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__A= False
# Data collator
# This one will take care of randomly masking the tokens.
__A= DataCollatorForWholeWordMask(tokenizer=UpperCamelCase_,mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__A= Trainer(
model=UpperCamelCase_,args=UpperCamelCase_,train_dataset=tokenized_datasets['train'] if training_args.do_train else None,eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None,tokenizer=UpperCamelCase_,data_collator=UpperCamelCase_,)
# Training
if training_args.do_train:
if last_checkpoint is not None:
__A= last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__A= model_args.model_name_or_path
else:
__A= None
__A= trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__A= os.path.join(training_args.output_dir,'train_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase_,'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir,'trainer_state.json' ) )
# Evaluation
__A= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__A= trainer.evaluate()
__A= math.exp(eval_output['eval_loss'] )
__A= perplexity
__A= os.path.join(training_args.output_dir,'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase_,'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
return results
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 186
|
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ : Optional[Any] = 1_00
UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def A ( UpperCamelCase_ : int ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase__ = set()
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A ( UpperCamelCase_ : int = 50_00 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , UpperCamelCase_ ):
if len(partition(UpperCamelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 48
| 0
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( snake_case_ ):
UpperCAmelCase__ = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ = '''LayoutLMv3ImageProcessor'''
UpperCAmelCase__ = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self : Union[str, Any] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Dict ) -> Tuple:
__magic_name__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
__magic_name__ = kwargs.pop("feature_extractor" )
__magic_name__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowerCamelCase : Union[List[List[int]], List[List[List[int]]]] = None , __lowerCamelCase : Optional[Union[List[int], List[List[int]]]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : List[str] , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
__magic_name__ = self.image_processor(images=__lowerCamelCase , return_tensors=__lowerCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__magic_name__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
__magic_name__ = features["words"]
__magic_name__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
# add pixel values
__magic_name__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
__magic_name__ = self.get_overflowing_images(__lowerCamelCase , encoded_inputs["overflow_to_sample_mapping"] )
__magic_name__ = images
return encoded_inputs
def _snake_case ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ) -> Union[str, Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__magic_name__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(__lowerCamelCase )} and {len(__lowerCamelCase )}''' )
return images_with_overflow
def _snake_case ( self : Optional[Any] , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : str , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> Dict:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def _snake_case ( self : Any ) -> str:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _snake_case ( self : Union[str, Any] ) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : Any ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 468
|
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:int ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 468
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> str:
snake_case__ : Any = [0] * len(A_ )
snake_case__ : Union[str, Any] = []
snake_case__ : List[str] = [1] * len(A_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A_ ) ):
if indegree[i] == 0:
queue.append(A_ )
while queue:
snake_case__ : Dict = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case__ : Dict = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(A_ )
print(max(A_ ) )
# Adjacency list of Graph
A_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 270
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 221
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 16
_A = 32
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int = 16 ):
__UpperCamelCase =AutoTokenizer.from_pretrained('bert-base-cased' )
__UpperCamelCase =load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase =datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase =16
elif accelerator.mixed_precision != "no":
__UpperCamelCase =8
else:
__UpperCamelCase =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding='longest' , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , )
# Instantiate dataloaders.
__UpperCamelCase =DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A = mocked_dataloaders # noqa: F811
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE__ ) == "1":
__UpperCamelCase =2
# New Code #
__UpperCamelCase =int(args.gradient_accumulation_steps )
# Initialize accelerator
__UpperCamelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase =config['lr']
__UpperCamelCase =int(config['num_epochs'] )
__UpperCamelCase =int(config['seed'] )
__UpperCamelCase =int(config['batch_size'] )
__UpperCamelCase =evaluate.load('glue' , 'mrpc' )
set_seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase =model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
__UpperCamelCase =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =model(**SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =output.loss
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase =model(**SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
__UpperCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__UpperCamelCase =parser.parse_args()
__UpperCamelCase ={'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 711
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.