code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 250
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 250
| 1
|
'''simple docstring'''
UpperCamelCase_ = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 320
|
'''simple docstring'''
UpperCamelCase_ = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
UpperCamelCase_ = ['''a''', '''b''', '''c''', '''d''', '''e''']
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = start
# add current to visited
visited.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :int = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE__ :Dict = topological_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCAmelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE__ :Optional[int] = topological_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# return sort
return sort
if __name__ == "__main__":
UpperCamelCase_ = topological_sort('''a''', [], [])
print(sort)
| 320
| 1
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_lowerCamelCase : int = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_lowerCamelCase : List[str] = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowercase_ )[0]
@deprecated(lowercase_ , '''Please use tf.data to implement this functionality.''' )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowercase_ ) as bytestream:
A__ = _readaa(lowercase_ )
if magic != 2_051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
A__ = _readaa(lowercase_ )
A__ = _readaa(lowercase_ )
A__ = _readaa(lowercase_ )
A__ = bytestream.read(rows * cols * num_images )
A__ = numpy.frombuffer(lowercase_ , dtype=numpy.uinta )
A__ = data.reshape(lowercase_ , lowercase_ , lowercase_ , 1 )
return data
@deprecated(lowercase_ , '''Please use tf.one_hot on tensors.''' )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = labels_dense.shape[0]
A__ = numpy.arange(lowercase_ ) * num_classes
A__ = numpy.zeros((num_labels, num_classes) )
A__ = 1
return labels_one_hot
@deprecated(lowercase_ , '''Please use tf.data to implement this functionality.''' )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False , lowercase_=10 ) -> List[Any]:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowercase_ ) as bytestream:
A__ = _readaa(lowercase_ )
if magic != 2_049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
A__ = _readaa(lowercase_ )
A__ = bytestream.read(lowercase_ )
A__ = numpy.frombuffer(lowercase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowercase_ , lowercase_ )
return labels
class UpperCamelCase_ :
'''simple docstring'''
@deprecated(
UpperCAmelCase__ , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : str=dtypes.floataa , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=None , ) ->List[Any]:
'''simple docstring'''
A__ , A__ = random_seed.get_seed(UpperCAmelCase__)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
A__ = dtypes.as_dtype(UpperCAmelCase__).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
A__ = 10_000
A__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
A__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A__ = images.astype(numpy.floataa)
A__ = numpy.multiply(UpperCAmelCase__ , 1.0 / 255.0)
A__ = images
A__ = labels
A__ = 0
A__ = 0
@property
def SCREAMING_SNAKE_CASE ( self : int) ->Dict:
'''simple docstring'''
return self._images
@property
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
return self._labels
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
return self._num_examples
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
return self._epochs_completed
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True) ->Optional[int]:
'''simple docstring'''
if fake_data:
A__ = [1] * 784
A__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCAmelCase__)],
[fake_label for _ in range(UpperCAmelCase__)],
)
A__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A__ = numpy.arange(self._num_examples)
numpy.random.shuffle(UpperCAmelCase__)
A__ = self.images[perma]
A__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A__ = self._num_examples - start
A__ = self._images[start : self._num_examples]
A__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A__ = numpy.arange(self._num_examples)
numpy.random.shuffle(UpperCAmelCase__)
A__ = self.images[perm]
A__ = self.labels[perm]
# Start next epoch
A__ = 0
A__ = batch_size - rest_num_examples
A__ = self._index_in_epoch
A__ = self._images[start:end]
A__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
A__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowercase_ , '''Please write your own downloading logic.''' )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
if not gfile.Exists(lowercase_ ):
gfile.MakeDirs(lowercase_ )
A__ = os.path.join(lowercase_ , lowercase_ )
if not gfile.Exists(lowercase_ ):
urllib.request.urlretrieve(lowercase_ , lowercase_ ) # noqa: S310
with gfile.GFile(lowercase_ ) as f:
A__ = f.size()
print('''Successfully downloaded''' , lowercase_ , lowercase_ , '''bytes.''' )
return filepath
@deprecated(
lowercase_ , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False , lowercase_=False , lowercase_=dtypes.floataa , lowercase_=True , lowercase_=5_000 , lowercase_=None , lowercase_=DEFAULT_SOURCE_URL , ) -> Any:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowercase_ , one_hot=lowercase_ , dtype=lowercase_ , seed=lowercase_ )
A__ = fake()
A__ = fake()
A__ = fake()
return _Datasets(train=lowercase_ , validation=lowercase_ , test=lowercase_ )
if not source_url: # empty string check
A__ = DEFAULT_SOURCE_URL
A__ = '''train-images-idx3-ubyte.gz'''
A__ = '''train-labels-idx1-ubyte.gz'''
A__ = '''t10k-images-idx3-ubyte.gz'''
A__ = '''t10k-labels-idx1-ubyte.gz'''
A__ = _maybe_download(
lowercase_ , lowercase_ , source_url + train_images_file )
with gfile.Open(lowercase_ , '''rb''' ) as f:
A__ = _extract_images(lowercase_ )
A__ = _maybe_download(
lowercase_ , lowercase_ , source_url + train_labels_file )
with gfile.Open(lowercase_ , '''rb''' ) as f:
A__ = _extract_labels(lowercase_ , one_hot=lowercase_ )
A__ = _maybe_download(
lowercase_ , lowercase_ , source_url + test_images_file )
with gfile.Open(lowercase_ , '''rb''' ) as f:
A__ = _extract_images(lowercase_ )
A__ = _maybe_download(
lowercase_ , lowercase_ , source_url + test_labels_file )
with gfile.Open(lowercase_ , '''rb''' ) as f:
A__ = _extract_labels(lowercase_ , one_hot=lowercase_ )
if not 0 <= validation_size <= len(lowercase_ ):
A__ = (
'''Validation size should be between 0 and '''
f"""{len(lowercase_ )}. Received: {validation_size}."""
)
raise ValueError(lowercase_ )
A__ = train_images[:validation_size]
A__ = train_labels[:validation_size]
A__ = train_images[validation_size:]
A__ = train_labels[validation_size:]
A__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
A__ = _DataSet(lowercase_ , lowercase_ , **lowercase_ )
A__ = _DataSet(lowercase_ , lowercase_ , **lowercase_ )
A__ = _DataSet(lowercase_ , lowercase_ , **lowercase_ )
return _Datasets(train=lowercase_ , validation=lowercase_ , test=lowercase_ )
| 87
|
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase :
def __init__( self : str ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = psutil.Process()
lowerCamelCase__ : Union[str, Any] = False
def A_ ( self : Optional[int] ) -> int:
lowerCamelCase__ : Optional[Any] = -1
while True:
lowerCamelCase__ : Dict = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A_ ( self : Tuple ) -> Dict:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = threading.Thread(target=self.peak_monitor )
lowerCamelCase__ : Union[str, Any] = True
self.thread.start()
def A_ ( self : str ) -> Dict:
lowerCamelCase__ : int = False
self.thread.join()
return self.cpu_memory_peak
_UpperCAmelCase : Dict = PeakCPUMemory()
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# Time
lowerCamelCase__ : List[Any] = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase__ : List[str] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase__ : Union[str, Any] = torch.cuda.memory_allocated(_UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
# Time
lowerCamelCase__ : Optional[int] = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase__ : Dict = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
lowerCamelCase__ : int = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase__ : List[str] = (torch.cuda.memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
lowerCamelCase__ : Optional[Any] = (torch.cuda.max_memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
return measures
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_UpperCAmelCase )]:.2f}MiB""" )
lowerCamelCase__ : List[str] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 295
| 0
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
A : List[str] = logging.getLogger()
def _lowerCAmelCase ( _lowerCAmelCase ) -> str:
'''simple docstring'''
__snake_case = {}
__snake_case = os.path.join(_lowerCAmelCase , "all_results.json" )
if os.path.exists(_lowerCAmelCase ):
with open(_lowerCAmelCase , "r" ) as f:
__snake_case = json.load(_lowerCAmelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
A : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCamelCase( _SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
import xla_spawn
__snake_case = self.get_auto_remove_tmp_dir()
__snake_case = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(A_ , "argv" , A_ ):
__snake_case = time()
xla_spawn.main()
__snake_case = time()
__snake_case = get_results(A_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
import xla_spawn
__snake_case = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(A_ , "argv" , A_ ):
xla_spawn.main()
| 703
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase:
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]=1_3 , SCREAMING_SNAKE_CASE : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Any=3_2 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : List[Any]=4 , SCREAMING_SNAKE_CASE : Optional[int]=3_7 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=1_0 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : Dict=[1, 1_6, 4, 4] , SCREAMING_SNAKE_CASE : List[str]=None , ) -> int:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
__snake_case = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__snake_case = (self.image_size // 3_2) ** 2
__snake_case = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 1_6, 3_2],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> int:
'''simple docstring'''
__snake_case = ViTHybridModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case = self.type_sequence_label_size
__snake_case = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase( _a , _a , unittest.TestCase ):
snake_case_ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
snake_case_ : str = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
snake_case_ : Tuple = False
snake_case_ : Optional[Any] = False
snake_case_ : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case = ViTHybridModelTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__snake_case = model_class(config=SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
'''simple docstring'''
__snake_case = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE )
# verify the logits
__snake_case = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
__snake_case = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
__snake_case = prepare_img()
__snake_case = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" )
__snake_case = model(**SCREAMING_SNAKE_CASE )
__snake_case = outputs.logits
# model predicts one of the 1000 ImageNet classes
__snake_case = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 473
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( a , a , a , a ) -> Optional[Any]: # noqa: E741
'''simple docstring'''
while r - l > 1:
__magic_name__ = (l + r) // 2
if v[m] >= key:
__magic_name__ = m
else:
__magic_name__ = m # noqa: E741
return r
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
if len(a ) == 0:
return 0
__magic_name__ = [0] * len(a )
__magic_name__ = 1
__magic_name__ = v[0]
for i in range(1 , len(a ) ):
if v[i] < tail[0]:
__magic_name__ = v[i]
elif v[i] > tail[length - 1]:
__magic_name__ = v[i]
length += 1
else:
__magic_name__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 432
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCAmelCase = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCamelCase ( a , a , a=None , a=None , a=None , a=None , a=None , a=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
__magic_name__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__magic_name__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__magic_name__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a__ : Union[str, Any] , a__ : Dict=13 , a__ : Tuple=7 , a__ : Any=True , a__ : Optional[int]=False , a__ : Dict=99 , a__ : str=16 , a__ : Tuple=2 , a__ : Union[str, Any]=4 , a__ : List[str]=4 , a__ : Dict="gelu" , a__ : List[str]=0.1 , a__ : str=0.1 , a__ : Optional[Any]=32 , a__ : Dict=2 , a__ : List[str]=1 , a__ : Tuple=0 , a__ : Optional[Any]=0.02 , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = initializer_range
def snake_case__ ( self : List[str] ):
__magic_name__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__magic_name__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__magic_name__ = shift_tokens_right(a__ , 1 , 2 )
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=a__ , )
__magic_name__ = prepare_blenderbot_inputs_dict(a__ , a__ , a__ )
return config, inputs_dict
def snake_case__ ( self : int ):
__magic_name__ , __magic_name__ = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : Dict , a__ : Tuple , a__ : List[Any] , a__ : Union[str, Any] ):
__magic_name__ = 20
__magic_name__ = model_class_name(a__ )
__magic_name__ = model.encode(inputs_dict['''input_ids'''] )
__magic_name__ , __magic_name__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
__magic_name__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , a__ , decoder_attention_mask=a__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a__ , )
__magic_name__ = model.decode(a__ , a__ )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def snake_case__ ( self : str , a__ : Tuple , a__ : List[Any] , a__ : Union[str, Any] ):
__magic_name__ = 20
__magic_name__ = model_class_name(a__ )
__magic_name__ = model.encode(inputs_dict['''input_ids'''] )
__magic_name__ , __magic_name__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__magic_name__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , a__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a__ , decoder_position_ids=a__ , )
__magic_name__ = model.decode(a__ , a__ , decoder_attention_mask=a__ )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[int] = 99
def snake_case__ ( self : int ):
__magic_name__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__magic_name__ = input_ids.shape[0]
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__ ( self : Any ):
__magic_name__ , __magic_name__ , __magic_name__ = self._get_config_and_data()
__magic_name__ = FlaxBlenderbotForConditionalGeneration(a__ )
__magic_name__ = lm_model(input_ids=a__ )
__magic_name__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , a__ )
def snake_case__ ( self : List[Any] ):
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__magic_name__ = FlaxBlenderbotForConditionalGeneration(a__ )
__magic_name__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__magic_name__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__magic_name__ = lm_model(input_ids=a__ , decoder_input_ids=a__ )
__magic_name__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , a__ )
def snake_case__ ( self : int ):
__magic_name__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__magic_name__ = shift_tokens_right(a__ , 1 , 2 )
__magic_name__ = np.equal(a__ , 1 ).astype(np.floataa ).sum()
__magic_name__ = np.equal(a__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(a__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ,__a ):
__SCREAMING_SNAKE_CASE :List[str] = True
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE :List[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = FlaxBlenderbotModelTester(self )
def snake_case__ ( self : List[str] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a__ , a__ , a__ )
def snake_case__ ( self : str ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a__ , a__ , a__ )
def snake_case__ ( self : Any ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(a__ , a__ )
__magic_name__ = model_class(a__ )
@jax.jit
def encode_jitted(a__ : Union[str, Any] , a__ : Tuple=None , **a__ : Tuple ):
return model.encode(input_ids=a__ , attention_mask=a__ )
with self.subTest('''JIT Enabled''' ):
__magic_name__ = encode_jitted(**a__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__magic_name__ = encode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Any ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = model_class(a__ )
__magic_name__ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__magic_name__ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(a__ : Union[str, Any] , a__ : List[Any] , a__ : Dict ):
return model.decode(
decoder_input_ids=a__ , decoder_attention_mask=a__ , encoder_outputs=a__ , )
with self.subTest('''JIT Enabled''' ):
__magic_name__ = decode_jitted(**a__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__magic_name__ = decode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__magic_name__ = np.ones((1, 1) ) * model.config.eos_token_id
__magic_name__ = model(a__ )
self.assertIsNotNone(a__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def snake_case__ ( self : List[Any] ):
__magic_name__ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
__magic_name__ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
__magic_name__ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=a__ )
__magic_name__ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
__magic_name__ = ['''Sam''']
__magic_name__ = tokenizer(a__ , return_tensors='''jax''' )
__magic_name__ = model.generate(**a__ , **a__ )
__magic_name__ = '''Sam is a great name. It means "sun" in Gaelic.'''
__magic_name__ = tokenizer.batch_decode(a__ , **a__ )
assert generated_txt[0].strip() == tgt_text
| 432
| 1
|
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: int ):
"""simple docstring"""
A__ = 0
A__ = 0
A__ = {}
def UpperCamelCase ( self: int , UpperCamelCase: Any ):
"""simple docstring"""
if vertex not in self.adjacency:
A__ = {}
self.num_vertices += 1
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: List[Any] , UpperCamelCase: List[Any] ):
"""simple docstring"""
self.add_vertex(UpperCamelCase )
self.add_vertex(UpperCamelCase )
if head == tail:
return
A__ = weight
A__ = weight
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.get_edges()
for edge in edges:
A__ , A__ , A__ = edge
edges.remove((tail, head, weight) )
for i in range(len(UpperCamelCase ) ):
A__ = list(edges[i] )
edges.sort(key=lambda UpperCamelCase : e[2] )
for i in range(len(UpperCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A__ = edges[i][2] + 1
for edge in edges:
A__ , A__ , A__ = edge
A__ = weight
A__ = weight
def __str__( self: Any ):
"""simple docstring"""
A__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
A__ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip("""\n""" )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def UpperCamelCase ( UpperCamelCase: str=None , UpperCamelCase: Union[str, Any]=None ):
"""simple docstring"""
A__ = Graph()
if vertices is None:
A__ = []
if edges is None:
A__ = []
for vertex in vertices:
g.add_vertex(UpperCamelCase )
for edge in edges:
g.add_edge(*UpperCamelCase )
return g
class a :
"""simple docstring"""
def __init__( self: int ):
"""simple docstring"""
A__ = {}
A__ = {}
def __len__( self: Dict ):
"""simple docstring"""
return len(self.parent )
def UpperCamelCase ( self: int , UpperCamelCase: Any ):
"""simple docstring"""
if item in self.parent:
return self.find(UpperCamelCase )
A__ = item
A__ = 0
return item
def UpperCamelCase ( self: int , UpperCamelCase: Any ):
"""simple docstring"""
if item not in self.parent:
return self.make_set(UpperCamelCase )
if item != self.parent[item]:
A__ = self.find(self.parent[item] )
return self.parent[item]
def UpperCamelCase ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: str ):
"""simple docstring"""
A__ = self.find(UpperCamelCase )
A__ = self.find(UpperCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
A__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A__ = roota
return roota
return None
@staticmethod
def UpperCamelCase ( UpperCamelCase: Any ):
"""simple docstring"""
A__ = graph.num_vertices
A__ = Graph.UnionFind()
A__ = []
while num_components > 1:
A__ = {}
for vertex in graph.get_vertices():
A__ = -1
A__ = graph.get_edges()
for edge in edges:
A__ , A__ , A__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
A__ , A__ , A__ = edge
A__ = union_find.find(UpperCamelCase )
A__ = union_find.find(UpperCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A__ , A__ , A__ = cheap_edge[vertex]
if union_find.find(UpperCamelCase ) != union_find.find(UpperCamelCase ):
union_find.union(UpperCamelCase , UpperCamelCase )
mst_edges.append(cheap_edge[vertex] )
A__ = num_components - 1
A__ = Graph.build(edges=UpperCamelCase )
return mst
| 701
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "segformer"
def __init__( self: Optional[Any] , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: int=4 , UpperCamelCase: int=[2, 2, 2, 2] , UpperCamelCase: List[str]=[8, 4, 2, 1] , UpperCamelCase: List[Any]=[32, 64, 1_60, 2_56] , UpperCamelCase: Any=[7, 3, 3, 3] , UpperCamelCase: Union[str, Any]=[4, 2, 2, 2] , UpperCamelCase: Tuple=[1, 2, 5, 8] , UpperCamelCase: Optional[int]=[4, 4, 4, 4] , UpperCamelCase: Dict="gelu" , UpperCamelCase: Optional[Any]=0.0 , UpperCamelCase: List[str]=0.0 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: int=0.1 , UpperCamelCase: Optional[int]=1e-6 , UpperCamelCase: int=2_56 , UpperCamelCase: Union[str, Any]=2_55 , **UpperCamelCase: Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase , )
A__ = num_channels
A__ = num_encoder_blocks
A__ = depths
A__ = sr_ratios
A__ = hidden_sizes
A__ = patch_sizes
A__ = strides
A__ = mlp_ratios
A__ = num_attention_heads
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = classifier_dropout_prob
A__ = initializer_range
A__ = drop_path_rate
A__ = layer_norm_eps
A__ = decoder_hidden_size
A__ = kwargs.get("""reshape_last_stage""" , UpperCamelCase )
A__ = semantic_loss_ignore_index
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("1.11" )
@property
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
return 1e-4
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return 12
| 500
| 0
|
"""simple docstring"""
import argparse
import os
import re
snake_case_ : List[str] = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
snake_case_ : List[str] = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
snake_case_ : Optional[Any] = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def lowercase_ ( _lowercase : Tuple , _lowercase : Any = False ):
'''simple docstring'''
with open(a_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase : List[str] = f.read()
UpperCAmelCase : int = content.split("\n" )
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = 0
while line_idx < len(a_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCAmelCase : Union[str, Any] = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCAmelCase : Union[str, Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCAmelCase : List[str] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCAmelCase : int = sorted(a_ , key=lambda _lowercase : _re_identifier.search(a_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(a_ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(a_ ) )
elif "\n".join(a_ ) != content:
return True
def lowercase_ ( _lowercase : Dict = False ):
'''simple docstring'''
UpperCAmelCase : Dict = [os.path.join(a_ , a_ ) for f in os.listdir(a_ ) if f.endswith(".py" )]
UpperCAmelCase : Dict = [sort_auto_mapping(a_ , overwrite=a_ ) for fname in fnames]
if not overwrite and any(a_ ):
UpperCAmelCase : Optional[Any] = [f for f, d in zip(a_ , a_ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(a_ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
snake_case_ : int = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 595
|
'''simple docstring'''
def __lowerCAmelCase ( a_ ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
SCREAMING_SNAKE_CASE : Optional[int] = 4
SCREAMING_SNAKE_CASE : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
SCREAMING_SNAKE_CASE : Optional[int] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 251
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a ={
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705
|
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase ) -> str:
'''simple docstring'''
lowerCamelCase__ =[]
lowerCamelCase__ =[]
lowerCamelCase__ ={
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
lowerCamelCase__ =len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(__lowerCAmelCase ) , "Postfix".center(__lowerCAmelCase ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCAmelCase ) == 0:
stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCAmelCase ) # push x to stack
print(
x.center(8 ) , ("".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=" | " , ) # Output in tabular format
while len(__lowerCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=" | " , ) # Output in tabular format
return "".join(__lowerCAmelCase ) # return Postfix as str
def lowerCamelCase_ ( __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowerCamelCase__ =list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCAmelCase ) ):
if infix[i] == "(":
lowerCamelCase__ =")" # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ ="(" # change ")" to "("
return (infix_2_postfix("".join(__lowerCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a =input('\nEnter an Infix Equation = ') # Input an Infix equation
a =''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 132
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _lowerCAmelCase() -> List[Any]:
_SCREAMING_SNAKE_CASE =ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
_SCREAMING_SNAKE_CASE =parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase__ )
DownloadCommand.register_subcommand(lowerCAmelCase__ )
EnvironmentCommand.register_subcommand(lowerCAmelCase__ )
RunCommand.register_subcommand(lowerCAmelCase__ )
ServeCommand.register_subcommand(lowerCAmelCase__ )
UserCommands.register_subcommand(lowerCAmelCase__ )
AddNewModelCommand.register_subcommand(lowerCAmelCase__ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase__ )
LfsCommands.register_subcommand(lowerCAmelCase__ )
PTtoTFCommand.register_subcommand(lowerCAmelCase__ )
# Let's go
_SCREAMING_SNAKE_CASE =parser.parse_args()
if not hasattr(lowerCAmelCase__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_SCREAMING_SNAKE_CASE =args.func(lowerCAmelCase__ )
service.run()
if __name__ == "__main__":
main()
| 255
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase_ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ):
__a = TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id='''test-model-flax''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size='''10KB''' )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 695
| 0
|
"""simple docstring"""
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError('only integers accepted as input' )
else:
UpperCAmelCase__ = str(abs(lowerCamelCase ) )
UpperCAmelCase__ = [list(lowerCamelCase ) for char in range(len(lowerCamelCase ) )]
for index in range(len(lowerCamelCase ) ):
num_transpositions[index].pop(lowerCamelCase )
return max(
int(''.join(list(lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 632
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ : int = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 632
| 1
|
from typing import Dict, Optional
import numpy as np
import datasets
snake_case__ : Optional[int] = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
snake_case__ : Optional[Any] = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
snake_case__ : List[Any] = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
__lowercase = new_id
# turn into Numpy arrays
__lowercase = np.array(_SCREAMING_SNAKE_CASE )
__lowercase = np.array(_SCREAMING_SNAKE_CASE )
if reduce_labels:
__lowercase = 2_5_5
__lowercase = label - 1
__lowercase = 2_5_5
__lowercase = label != ignore_index
__lowercase = np.not_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = pred_label[mask]
__lowercase = np.array(_SCREAMING_SNAKE_CASE )[mask]
__lowercase = pred_label[pred_label == label]
__lowercase = np.histogram(_SCREAMING_SNAKE_CASE , bins=_SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
__lowercase = np.histogram(_SCREAMING_SNAKE_CASE , bins=_SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
__lowercase = np.histogram(_SCREAMING_SNAKE_CASE , bins=_SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
__lowercase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ):
__lowercase = np.zeros((num_labels,) , dtype=np.floataa )
__lowercase = np.zeros((num_labels,) , dtype=np.floataa )
__lowercase = np.zeros((num_labels,) , dtype=np.floataa )
__lowercase = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase , __lowercase , __lowercase = intersect_and_union(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ):
__lowercase , __lowercase , __lowercase , __lowercase = total_intersect_and_union(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# compute metrics
__lowercase = {}
__lowercase = total_area_intersect.sum() / total_area_label.sum()
__lowercase = total_area_intersect / total_area_union
__lowercase = total_area_intersect / total_area_label
__lowercase = np.nanmean(_SCREAMING_SNAKE_CASE )
__lowercase = np.nanmean(_SCREAMING_SNAKE_CASE )
__lowercase = all_acc
__lowercase = iou
__lowercase = acc
if nan_to_num is not None:
__lowercase = {metric: np.nan_to_num(_SCREAMING_SNAKE_CASE , nan=_SCREAMING_SNAKE_CASE ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def _snake_case ( self : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : bool , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Dict[int, int]] = None , lowerCamelCase : bool = False , ):
'''simple docstring'''
__lowercase = mean_iou(
results=lowerCamelCase , gt_seg_maps=lowerCamelCase , num_labels=lowerCamelCase , ignore_index=lowerCamelCase , nan_to_num=lowerCamelCase , label_map=lowerCamelCase , reduce_labels=lowerCamelCase , )
return iou_result
| 402
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : List[Any]=None , lowerCamelCase : str=None , *lowerCamelCase : List[Any] , **lowerCamelCase : str ):
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
if config is None:
assert isinstance(self.model , lowerCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
__lowercase = self.model.config
else:
__lowercase = config
__lowercase = data_args
__lowercase = self.config.tgt_vocab_size if isinstance(self.config , lowerCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
__lowercase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__lowercase = label_smoothed_nll_loss
def _snake_case ( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.optimizer is None:
__lowercase = ["bias", "LayerNorm.weight"]
__lowercase = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
__lowercase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__lowercase = Adafactor
__lowercase = {"scale_parameter": False, "relative_step": False}
else:
__lowercase = AdamW
__lowercase = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
__lowercase = self.args.learning_rate
if self.sharded_ddp:
__lowercase = OSS(
params=lowerCamelCase , optim=lowerCamelCase , **lowerCamelCase , )
else:
__lowercase = optimizer_cls(lowerCamelCase , **lowerCamelCase )
if self.lr_scheduler is None:
__lowercase = self._get_lr_scheduler(lowerCamelCase )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _snake_case ( self : List[str] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__lowercase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__lowercase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__lowercase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowerCamelCase )
return scheduler
def _snake_case ( self : Tuple ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _snake_case ( self : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__lowercase = model(**lowerCamelCase , use_cache=lowerCamelCase )[0]
__lowercase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__lowercase , __lowercase = model(**lowerCamelCase , labels=lowerCamelCase , use_cache=lowerCamelCase )[:2]
else:
# compute label smoothed loss
__lowercase = model(**lowerCamelCase , use_cache=lowerCamelCase )[0]
__lowercase = torch.nn.functional.log_softmax(lowerCamelCase , dim=-1 )
__lowercase , __lowercase = self.loss_fn(lowerCamelCase , lowerCamelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = inputs.pop("labels" )
__lowercase , __lowercase = self._compute_loss(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return loss
def _snake_case ( self : List[str] , lowerCamelCase : nn.Module , lowerCamelCase : Dict[str, Union[torch.Tensor, Any]] , lowerCamelCase : bool , lowerCamelCase : Optional[List[str]] = None , ):
'''simple docstring'''
__lowercase = self._prepare_inputs(lowerCamelCase )
__lowercase = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__lowercase = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **lowerCamelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__lowercase = self._pad_tensors_to_max_len(lowerCamelCase , gen_kwargs["max_length"] )
__lowercase = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
__lowercase , __lowercase = self._compute_loss(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__lowercase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__lowercase = self._pad_tensors_to_max_len(lowerCamelCase , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
__lowercase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__lowercase = tensor
return padded_tensor
| 402
| 1
|
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] =len(__lowerCAmelCase ) + 1
__lowerCamelCase : int =len(__lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowerCamelCase : Dict =[[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
# since string of zero length match pattern of zero length
__lowerCamelCase : Dict =1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCAmelCase ):
__lowerCamelCase : Union[str, Any] =0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCAmelCase ):
__lowerCamelCase : int =dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCAmelCase ):
for j in range(1 , __lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowerCamelCase : Any =dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowerCamelCase : List[str] =1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowerCamelCase : List[Any] =dp[i - 1][j]
else:
__lowerCamelCase : Optional[int] =0
else:
__lowerCamelCase : Dict =0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCamelCase = "aab"
_UpperCamelCase = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 712
|
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] =len(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] =[[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] =y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCamelCase : List[Any] =(
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger("""transformers.models.speecht5""")
lowerCAmelCase : Tuple = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
lowerCAmelCase : List[str] = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
lowerCAmelCase : Optional[int] = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
lowerCAmelCase : str = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
lowerCAmelCase : Dict = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
lowerCAmelCase : Any = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
lowerCAmelCase : Union[str, Any] = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
lowerCAmelCase : Dict = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
lowerCAmelCase : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowerCAmelCase : str = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCAmelCase : List[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCAmelCase : Dict = []
lowerCAmelCase : Optional[int] = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
lowerCAmelCase : Tuple = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
lowerCAmelCase : List[Any] = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
lowerCAmelCase : Any = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def _A ( A ,A ,A ,A ,A ) -> List[Any]:
for attribute in key.split("." ):
lowercase : List[str] = getattr(_A ,_A )
if weight_type is not None:
lowercase : Any = getattr(_A ,_A ).shape
else:
lowercase : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase : Optional[int] = value
elif weight_type == "weight_g":
lowercase : Optional[Any] = value
elif weight_type == "weight_v":
lowercase : Union[str, Any] = value
elif weight_type == "bias":
lowercase : Optional[int] = value
elif weight_type == "running_mean":
lowercase : Any = value
elif weight_type == "running_var":
lowercase : int = value
elif weight_type == "num_batches_tracked":
lowercase : Dict = value
else:
lowercase : Optional[Any] = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _A ( A ,A ) -> List[Any]:
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase : Dict = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _A ( A ,A ,A ) -> Any:
lowercase : int = []
if task == "s2t":
lowercase : Tuple = hf_model.speechta.encoder.prenet.feature_encoder
lowercase : List[Any] = MAPPING_S2T
lowercase : Union[str, Any] = IGNORE_KEYS_S2T
elif task == "t2s":
lowercase : Optional[int] = None
lowercase : str = MAPPING_T2S
lowercase : Optional[int] = IGNORE_KEYS_T2S
elif task == "s2s":
lowercase : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder
lowercase : Optional[int] = MAPPING_S2S
lowercase : int = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_A ,_A ):
logger.info(F'''{name} was ignored''' )
continue
lowercase : str = False
if "conv_layers" in name:
load_conv_layer(
_A ,_A ,_A ,_A ,hf_model.config.feat_extract_norm == "group" ,)
lowercase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowercase : List[Any] = key.split(".*." )
if prefix in name and suffix in name:
lowercase : int = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowercase : List[Any] = True
if "*" in mapped_key:
lowercase : Dict = name.split(_A )[0].split("." )[-2]
lowercase : Tuple = mapped_key.replace("*" ,_A )
if "weight_g" in name:
lowercase : Optional[int] = """weight_g"""
elif "weight_v" in name:
lowercase : List[Any] = """weight_v"""
elif "bias" in name:
lowercase : List[str] = """bias"""
elif "weight" in name:
lowercase : Union[str, Any] = """weight"""
elif "running_mean" in name:
lowercase : Any = """running_mean"""
elif "running_var" in name:
lowercase : Optional[Any] = """running_var"""
elif "num_batches_tracked" in name:
lowercase : List[Any] = """num_batches_tracked"""
else:
lowercase : Any = None
set_recursively(_A ,_A ,_A ,_A ,_A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _A ( A ,A ,A ,A ,A ) -> int:
lowercase : str = full_name.split("conv_layers." )[-1]
lowercase : List[str] = name.split("." )
lowercase : List[str] = int(items[0] )
lowercase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_A )
@torch.no_grad()
def _A ( A ,A ,A ,A=None ,A=None ,A=None ,) -> int:
if config_path is not None:
lowercase : str = SpeechTaConfig.from_pretrained(_A )
else:
lowercase : List[Any] = SpeechTaConfig()
if task == "s2t":
lowercase : List[Any] = config.max_text_positions
lowercase : Optional[int] = SpeechTaForSpeechToText(_A )
elif task == "t2s":
lowercase : Dict = 1_8_7_6
lowercase : str = 6_0_0
lowercase : Union[str, Any] = config.max_speech_positions
lowercase : Tuple = SpeechTaForTextToSpeech(_A )
elif task == "s2s":
lowercase : Tuple = 1_8_7_6
lowercase : Dict = config.max_speech_positions
lowercase : Any = SpeechTaForSpeechToSpeech(_A )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
lowercase : List[str] = SpeechTaTokenizer(_A ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowercase : int = AddedToken("<mask>" ,lstrip=_A ,rstrip=_A )
lowercase : Tuple = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
lowercase : Optional[int] = SpeechTaFeatureExtractor()
lowercase : Optional[int] = SpeechTaProcessor(tokenizer=_A ,feature_extractor=_A )
processor.save_pretrained(_A )
lowercase : List[Any] = torch.load(_A )
recursively_load_weights(fairseq_checkpoint["model"] ,_A ,_A )
model.save_pretrained(_A )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(_A )
model.push_to_hub(_A )
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCAmelCase : List[str] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 372
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
_lowerCAmelCase = {
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def UpperCamelCase ( _A ) -> List[str]:
lowercase : List[str] = EfficientNetConfig()
lowercase : Any = CONFIG_MAP[model_name]["""hidden_dim"""]
lowercase : List[str] = CONFIG_MAP[model_name]["""width_coef"""]
lowercase : str = CONFIG_MAP[model_name]["""depth_coef"""]
lowercase : int = CONFIG_MAP[model_name]["""image_size"""]
lowercase : List[Any] = CONFIG_MAP[model_name]["""dropout_rate"""]
lowercase : int = CONFIG_MAP[model_name]["""dw_padding"""]
lowercase : Optional[int] = """huggingface/label-files"""
lowercase : int = """imagenet-1k-id2label.json"""
lowercase : Any = 1_000
lowercase : Any = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
lowercase : Optional[int] = {int(_A ): v for k, v in idalabel.items()}
lowercase : int = idalabel
lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ) -> Tuple:
lowercase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Optional[int] = Image.open(requests.get(_A , stream=_A ).raw )
return im
def UpperCamelCase ( _A ) -> Optional[Any]:
lowercase : str = CONFIG_MAP[model_name]["""image_size"""]
lowercase : Optional[int] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_A , )
return preprocessor
def UpperCamelCase ( _A ) -> Optional[int]:
lowercase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
lowercase : Optional[Any] = sorted(set(_A ) )
lowercase : Dict = len(_A )
lowercase : List[str] = {b: str(_A ) for b, i in zip(_A , range(_A ) )}
lowercase : Union[str, Any] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
lowercase : str = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
lowercase : Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase : Optional[int] = """efficientnet.""" + item[1]
lowercase : Any = """classifier.weight"""
lowercase : Tuple = """classifier.bias"""
return key_mapping
def UpperCamelCase ( _A , _A , _A ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase : str = torch.from_numpy(_A ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase : Optional[int] = torch.from_numpy(_A ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase : List[Any] = torch.from_numpy(np.transpose(_A ) )
else:
lowercase : Optional[int] = torch.from_numpy(_A )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_A )
@torch.no_grad()
def UpperCamelCase ( _A , _A , _A , _A ) -> str:
lowercase : Any = model_classes[model_name](
include_top=_A , weights="""imagenet""" , input_tensor=_A , input_shape=_A , pooling=_A , classes=1_000 , classifier_activation="""softmax""" , )
lowercase : Dict = original_model.trainable_variables
lowercase : Any = original_model.non_trainable_variables
lowercase : Any = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase : Dict = param.numpy()
lowercase : List[str] = list(tf_params.keys() )
# Load HuggingFace model
lowercase : str = get_efficientnet_config(_A )
lowercase : List[Any] = EfficientNetForImageClassification(_A ).eval()
lowercase : Optional[int] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
lowercase : int = rename_keys(_A )
replace_params(_A , _A , _A )
# Initialize preprocessor and preprocess input image
lowercase : Optional[int] = convert_image_processor(_A )
lowercase : Any = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase : Union[str, Any] = hf_model(**_A )
lowercase : List[Any] = outputs.logits.detach().numpy()
# Original model inference
lowercase : Optional[Any] = False
lowercase : str = CONFIG_MAP[model_name]["""image_size"""]
lowercase : Optional[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase : Optional[Any] = image.img_to_array(_A )
lowercase : Dict = np.expand_dims(_A , axis=0 )
lowercase : List[str] = original_model.predict(_A )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_A , _A , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_A ):
os.mkdir(_A )
# Save converted model and image processor
hf_model.save_pretrained(_A )
preprocessor.save_pretrained(_A )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase : Dict = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_A )
hf_model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
_lowerCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 264
| 0
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a : Tuple = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}." )
if not ops[op](version.parse(__a ) , version.parse(__a ) ):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
__A = f"\n{hint}" if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , __a ):
__A , __A , __A = requirement, None, None
else:
__A = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , __a )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f" got {requirement}" )
__A , __A = match[0]
__A = want_full.split(""",""" ) # there could be multiple requirements
__A = {}
for w in want_range:
__A = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , __a )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f" but got {requirement}" )
__A , __A = match[0]
__A = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
__A = """.""".join([str(__a ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
return
# check if any version is installed
try:
__A = importlib.metadata.version(__a )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
def _SCREAMING_SNAKE_CASE ( __lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
__A = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(__a , __a )
| 720
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "AutoImageProcessor"
SCREAMING_SNAKE_CASE = "AutoTokenizer"
def __init__( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : str ):
"""simple docstring"""
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__A = self.image_processor
def __call__( self : Optional[Any] , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : int ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__A = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
__A = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
__A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any , *UpperCamelCase_ : int , **UpperCamelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 199
| 0
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : str , __lowerCamelCase : List[Any] ) -> List[str]:
return self.lineara(self.batchnorm(self.lineara(A_ ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A_ , model.state_dict() )
SCREAMING_SNAKE_CASE__ = os.path.join(A_ , '''index.json''' )
self.assertTrue(os.path.isfile(A_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
SCREAMING_SNAKE_CASE__ = os.path.join(A_ , f'''{key}.dat''' )
self.assertTrue(os.path.isfile(A_ ) )
# TODO: add tests on the fact weights are properly loaded
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
SCREAMING_SNAKE_CASE__ = torch.randn(2 , 3 , dtype=A_ )
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = offload_weight(A_ , '''weight''' , A_ , {} )
SCREAMING_SNAKE_CASE__ = os.path.join(A_ , '''weight.dat''' )
self.assertTrue(os.path.isfile(A_ ) )
self.assertDictEqual(A_ , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(A_ ).split('''.''' )[1]}} )
SCREAMING_SNAKE_CASE__ = load_offloaded_weight(A_ , index['''weight'''] )
self.assertTrue(torch.equal(A_ , A_ ) )
def lowercase_ ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = ModelForTest()
SCREAMING_SNAKE_CASE__ = model.state_dict()
SCREAMING_SNAKE_CASE__ = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A_ , A_ )
SCREAMING_SNAKE_CASE__ = OffloadedWeightsLoader(state_dict=A_ , save_folder=A_ )
# Every key is there with the right value
self.assertEqual(sorted(A_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(A_ , weight_map[key] ) )
SCREAMING_SNAKE_CASE__ = {k: v for k, v in state_dict.items() if '''weight''' in k}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A_ , A_ )
SCREAMING_SNAKE_CASE__ = OffloadedWeightsLoader(state_dict=A_ , save_folder=A_ )
# Every key is there with the right value
self.assertEqual(sorted(A_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(A_ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A_ , A_ )
# Duplicates are removed
SCREAMING_SNAKE_CASE__ = OffloadedWeightsLoader(state_dict=A_ , save_folder=A_ )
# Every key is there with the right value
self.assertEqual(sorted(A_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(A_ , weight_map[key] ) )
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
SCREAMING_SNAKE_CASE__ = extract_submodules_state_dict(A_ , ['''a.1''', '''a.2'''] )
self.assertDictEqual(A_ , {'''a.1''': 0, '''a.2''': 2} )
SCREAMING_SNAKE_CASE__ = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
SCREAMING_SNAKE_CASE__ = extract_submodules_state_dict(A_ , ['''a.1''', '''a.2'''] )
self.assertDictEqual(A_ , {'''a.1.a''': 0, '''a.2.a''': 2} )
| 493
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
A__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ ,R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' ,)
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : Optional[Any] , A_ : GenericTensor):
if self.framework == "tf":
lowerCAmelCase_ : Dict = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
lowerCAmelCase_ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A_)
else:
raise ValueError('''Unsupported framework''')
return masked_index
def UpperCAmelCase__ ( self : Tuple , A_ : GenericTensor):
lowerCAmelCase_ : List[str] = self.get_masked_index(A_)
lowerCAmelCase_ : Union[str, Any] = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def UpperCAmelCase__ ( self : str , A_ : GenericTensor):
if isinstance(A_ , A_):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A_)
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[str, Any] , A_ : Optional[int]=None , **A_ : List[str]):
if return_tensors is None:
lowerCAmelCase_ : Optional[int] = self.framework
lowerCAmelCase_ : Optional[Any] = self.tokenizer(A_ , return_tensors=A_)
self.ensure_exactly_one_mask_token(A_)
return model_inputs
def UpperCAmelCase__ ( self : List[str] , A_ : str):
lowerCAmelCase_ : Union[str, Any] = self.model(**A_)
lowerCAmelCase_ : List[str] = model_inputs['''input_ids''']
return model_outputs
def UpperCAmelCase__ ( self : str , A_ : str , A_ : str=5 , A_ : int=None):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase_ : int = target_ids.shape[0]
lowerCAmelCase_ : List[Any] = model_outputs['''input_ids'''][0]
lowerCAmelCase_ : int = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase_ : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
lowerCAmelCase_ : Optional[Any] = outputs.numpy()
lowerCAmelCase_ : List[str] = outputs[0, masked_index, :]
lowerCAmelCase_ : List[Any] = stable_softmax(A_ , axis=-1)
if target_ids is not None:
lowerCAmelCase_ : str = tf.gather_nd(tf.squeeze(A_ , 0) , target_ids.reshape(-1 , 1))
lowerCAmelCase_ : Any = tf.expand_dims(A_ , 0)
lowerCAmelCase_ : List[Any] = tf.math.top_k(A_ , k=A_)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase_ : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A_).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase_ : Dict = outputs[0, masked_index, :]
lowerCAmelCase_ : Dict = logits.softmax(dim=-1)
if target_ids is not None:
lowerCAmelCase_ : str = probs[..., target_ids]
lowerCAmelCase_ , lowerCAmelCase_ : int = probs.topk(A_)
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Optional[int] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
lowerCAmelCase_ : int = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
lowerCAmelCase_ : Dict = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase_ : str = target_ids[p].tolist()
lowerCAmelCase_ : List[Any] = p
# Filter padding out:
lowerCAmelCase_ : Tuple = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase_ : Any = self.tokenizer.decode(A_ , skip_special_tokens=A_)
lowerCAmelCase_ : str = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p]), '''sequence''': sequence}
row.append(A_)
result.append(A_)
if single_mask:
return result[0]
return result
def UpperCAmelCase__ ( self : int , A_ : Any , A_ : List[Any]=None):
if isinstance(A_ , A_):
lowerCAmelCase_ : List[str] = [targets]
try:
lowerCAmelCase_ : Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : Any = []
for target in targets:
lowerCAmelCase_ : List[str] = vocab.get(A_ , A_)
if id_ is None:
lowerCAmelCase_ : Optional[int] = self.tokenizer(
A_ , add_special_tokens=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , max_length=1 , truncation=A_ , )['''input_ids''']
if len(A_) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''')
continue
lowerCAmelCase_ : Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.""")
target_ids.append(id_)
lowerCAmelCase_ : List[str] = list(set(A_))
if len(A_) == 0:
raise ValueError('''At least one target must be provided when passed.''')
lowerCAmelCase_ : Tuple = np.array(A_)
return target_ids
def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[int]=None , A_ : Tuple=None):
lowerCAmelCase_ : int = {}
if targets is not None:
lowerCAmelCase_ : Optional[Any] = self.get_target_ids(A_ , A_)
lowerCAmelCase_ : str = target_ids
if top_k is not None:
lowerCAmelCase_ : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''')
return {}, {}, postprocess_params
def __call__( self : str , A_ : Tuple , *A_ : Dict , **A_ : Optional[Any]):
lowerCAmelCase_ : Tuple = super().__call__(A_ , **A_)
if isinstance(A_ , A_) and len(A_) == 1:
return outputs[0]
return outputs
| 171
| 0
|
'''simple docstring'''
def __lowercase (_lowercase ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(_lowercase, (list, tuple) ) or not all(
isinstance(_lowercase, _lowercase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__lowerCamelCase : Dict = numbers[0]
for i in range(1, len(_lowercase ) ):
# update the maximum and minimum subarray products
__lowerCamelCase : List[Any] = numbers[i]
if number < 0:
__lowerCamelCase , __lowerCamelCase : Tuple = min_till_now, max_till_now
__lowerCamelCase : Optional[int] = max(_lowercase, max_till_now * number )
__lowerCamelCase : Optional[int] = min(_lowercase, min_till_now * number )
# update the maximum product found till now
__lowerCamelCase : List[str] = max(_lowercase, _lowercase )
return max_prod
| 720
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase (_lowercase, _lowercase, _lowercase ) -> Optional[Any]:
"""simple docstring"""
# Initialise PyTorch model
__lowerCamelCase : str = RemBertConfig.from_json_file(_lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(_lowercase ) ) )
__lowerCamelCase : List[Any] = RemBertModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowercase, _lowercase, _lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_lowercase ) )
torch.save(model.state_dict(), _lowercase )
if __name__ == "__main__":
UpperCAmelCase__ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ :List[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 483
| 0
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowerCAmelCase__ = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowerCAmelCase__ = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowerCAmelCase__ = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowerCAmelCase__ = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowerCAmelCase__ = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) ,homepage='''https://github.com/openai/human-eval''' ,codebase_urls=['''https://github.com/openai/human-eval'''] ,reference_urls=['''https://github.com/openai/human-eval'''] ,license=_LICENSE ,)
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : int=[1, 1_0, 1_0_0] ,lowercase__ : Any=4 ,lowercase__ : Union[str, Any]=3.0 ):
if os.getenv('''HF_ALLOW_CODE_EVAL''' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=lowercase__ ) as executor:
__lowercase = []
__lowercase = Counter()
__lowercase = 0
__lowercase = defaultdict(lowercase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowercase__ ,lowercase__ ) ):
for candidate in candidates:
__lowercase = candidate + '''\n''' + test_case
__lowercase = (test_program, timeout, task_id, completion_id[task_id])
__lowercase = executor.submit(lowercase__ ,*lowercase__ )
futures.append(lowercase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowercase__ ):
__lowercase = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
__lowercase , __lowercase = [], []
for result in results.values():
result.sort()
__lowercase = [r[1]['''passed'''] for r in result]
total.append(len(lowercase__ ) )
correct.append(sum(lowercase__ ) )
__lowercase = np.array(lowercase__ )
__lowercase = np.array(lowercase__ )
__lowercase = k
__lowercase = {F"pass@{k}": estimate_pass_at_k(lowercase__ ,lowercase__ ,lowercase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
def estimator(A__ , A__ , A__ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(A__ , A__ ):
__lowercase = itertools.repeat(A__ , len(A__ ) )
else:
assert len(A__ ) == len(A__ )
__lowercase = iter(A__ )
return np.array([estimator(int(A__ ) , int(A__ ) , A__ ) for n, c in zip(A__ , A__ )] )
| 41
|
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase__( UpperCamelCase__ : Callable[[int | float], int | float] , UpperCamelCase__ : int | float , UpperCamelCase__ : int | float , UpperCamelCase__ : int = 1_00 , )->float:
A__ = x_start
A__ = fnc(UpperCamelCase__ )
A__ = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
A__ = (x_end - x_start) / steps + xa
A__ = fnc(UpperCamelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A__ = xa
A__ = fxa
return length
if __name__ == "__main__":
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] )->Dict:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
a__: str = 10
while i <= 100_000:
print(F"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10
| 190
| 0
|
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :List[Any] = DownBlockaD # noqa F405
UpperCAmelCase_ :Any = "down"
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[str] = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[int] = ResnetDownsampleBlockaD # noqa F405
UpperCAmelCase_ :int = "down"
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = AttnDownBlockaD # noqa F405
UpperCAmelCase_ :Any = "down"
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Tuple = CrossAttnDownBlockaD # noqa F405
UpperCAmelCase_ :Tuple = "down"
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ :Tuple = 32
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Tuple = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[Any] = SimpleCrossAttnDownBlockaD # noqa F405
UpperCAmelCase_ :Any = "down"
@property
def __lowerCAmelCase ( self ) -> List[str]:
return super().get_dummy_input(include_encoder_hidden_states=__A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Any = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ :List[str] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[Any] = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :int = SkipDownBlockaD # noqa F405
UpperCAmelCase_ :str = "down"
@property
def __lowerCAmelCase ( self ) -> Any:
return super().get_dummy_input(include_skip_sample=__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :int = AttnSkipDownBlockaD # noqa F405
UpperCAmelCase_ :Optional[int] = "down"
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return super().get_dummy_input(include_skip_sample=__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Dict = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Tuple = DownEncoderBlockaD # noqa F405
UpperCAmelCase_ :List[Any] = "down"
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return super().get_dummy_input(include_temb=__A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
lowerCAmelCase_ :Any = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[Any] = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[Any] = AttnDownEncoderBlockaD # noqa F405
UpperCAmelCase_ :Union[str, Any] = "down"
@property
def __lowerCAmelCase ( self ) -> str:
return super().get_dummy_input(include_temb=__A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
lowerCAmelCase_ :List[str] = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[int] = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = UNetMidBlockaD # noqa F405
UpperCAmelCase_ :Optional[int] = "mid"
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
lowerCAmelCase_ :Dict = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Tuple = UNetMidBlockaDCrossAttn # noqa F405
UpperCAmelCase_ :List[Any] = "mid"
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ , lowerCAmelCase_ :Any = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ :Tuple = 32
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :str = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = UNetMidBlockaDSimpleCrossAttn # noqa F405
UpperCAmelCase_ :str = "mid"
@property
def __lowerCAmelCase ( self ) -> Dict:
return super().get_dummy_input(include_encoder_hidden_states=__A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ :Optional[Any] = 32
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[str] = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Union[str, Any] = UpBlockaD # noqa F405
UpperCAmelCase_ :List[Any] = "up"
@property
def __lowerCAmelCase ( self ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = ResnetUpsampleBlockaD # noqa F405
UpperCAmelCase_ :Any = "up"
@property
def __lowerCAmelCase ( self ) -> int:
return super().get_dummy_input(include_res_hidden_states_tuple=__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :str = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Tuple = CrossAttnUpBlockaD # noqa F405
UpperCAmelCase_ :Optional[int] = "up"
@property
def __lowerCAmelCase ( self ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ :int = 32
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Optional[int] = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = SimpleCrossAttnUpBlockaD # noqa F405
UpperCAmelCase_ :Optional[int] = "up"
@property
def __lowerCAmelCase ( self ) -> Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=__A , include_encoder_hidden_states=__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ :Any = 32
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Dict = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = AttnUpBlockaD # noqa F405
UpperCAmelCase_ :Union[str, Any] = "up"
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=__A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Dict = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[int] = SkipUpBlockaD # noqa F405
UpperCAmelCase_ :Optional[int] = "up"
@property
def __lowerCAmelCase ( self ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=__A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Union[str, Any] = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Union[str, Any] = AttnSkipUpBlockaD # noqa F405
UpperCAmelCase_ :List[Any] = "up"
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=__A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[int] = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :int = UpDecoderBlockaD # noqa F405
UpperCAmelCase_ :str = "up"
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return super().get_dummy_input(include_temb=__A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :List[str] = {"""in_channels""": 32, """out_channels""": 32}
lowerCAmelCase_ :List[str] = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(__A )
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :List[Any] = AttnUpDecoderBlockaD # noqa F405
UpperCAmelCase_ :Tuple = "up"
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return super().get_dummy_input(include_temb=__A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Tuple = {"""in_channels""": 32, """out_channels""": 32}
lowerCAmelCase_ :int = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(__A )
| 256
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = "bert-generation"
def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Union[str, Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :List[Any] = vocab_size
lowerCAmelCase_ :int = hidden_size
lowerCAmelCase_ :Union[str, Any] = num_hidden_layers
lowerCAmelCase_ :str = num_attention_heads
lowerCAmelCase_ :str = hidden_act
lowerCAmelCase_ :List[str] = intermediate_size
lowerCAmelCase_ :Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ :List[str] = attention_probs_dropout_prob
lowerCAmelCase_ :Dict = max_position_embeddings
lowerCAmelCase_ :int = initializer_range
lowerCAmelCase_ :Optional[int] = layer_norm_eps
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :Dict = use_cache
| 256
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : str = RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
snake_case_ : List[str] = RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 480
|
import sys
import turtle
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
a_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
a_ : str = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 73
| 0
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_a = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , UpperCamelCase_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
assert _test_patching.open is open
_a = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , UpperCamelCase_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCAmelCase ( ) -> Any:
'''simple docstring'''
_a = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , UpperCamelCase_ ):
pass
def lowerCAmelCase ( ) -> Any:
'''simple docstring'''
_a = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , UpperCamelCase_ ) is None
with patch_submodule(_test_patching , "len" , UpperCamelCase_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
_a = "__test_patch_submodule_start_and_stop_mock__"
_a = patch_submodule(_test_patching , "open" , UpperCamelCase_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCAmelCase ( ) -> str:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_a = "__test_patch_submodule_successive_join__"
_a = "__test_patch_submodule_successive_dirname__"
_a = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , UpperCamelCase_ ):
with patch_submodule(_test_patching , "os.rename" , UpperCamelCase_ ):
with patch_submodule(_test_patching , "os.path.dirname" , UpperCamelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , UpperCamelCase_ ):
with patch_submodule(_test_patching , "os.path.join" , UpperCamelCase_ ):
with patch_submodule(_test_patching , "os.path.dirname" , UpperCamelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
_a = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , UpperCamelCase_ ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , UpperCamelCase_ ):
pass
| 702
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCAmelCase ( UpperCamelCase_: str ) -> Union[str, Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a = model_type_to_module_name(UpperCamelCase_ )
_a = importlib.import_module(f'''.{module_name}''' , "transformers.models" )
try:
return getattr(UpperCamelCase_ , UpperCamelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase_ , "__name__" , UpperCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a = importlib.import_module("transformers" )
if hasattr(UpperCamelCase_ , UpperCamelCase_ ):
return getattr(UpperCamelCase_ , UpperCamelCase_ )
return None
def lowerCAmelCase ( UpperCamelCase_: Union[str, os.PathLike] , UpperCamelCase_: Optional[Union[str, os.PathLike]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[Dict[str, str]] = None , UpperCamelCase_: Optional[Union[bool, str]] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , **UpperCamelCase_: Dict , ) -> Optional[int]:
'''simple docstring'''
_a = get_file_from_repo(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(UpperCamelCase_ , encoding="utf-8" ) as reader:
return json.load(UpperCamelCase_ )
class lowercase_ :
def __init__( self ) ->List[Any]:
'''simple docstring'''
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(a_ )
def lowerCamelCase__ ( cls , a_ , **a_ ) ->Dict:
'''simple docstring'''
_a = kwargs.pop("config" , a_ )
_a = kwargs.pop("trust_remote_code" , a_ )
_a = True
_a , _a = ImageProcessingMixin.get_image_processor_dict(a_ , **a_ )
_a = config_dict.get("image_processor_type" , a_ )
_a = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_a = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a = config_dict.pop("feature_extractor_type" , a_ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_a = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_a = config_dict["auto_map"]["AutoFeatureExtractor"]
_a = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a_ , a_ ):
_a = AutoConfig.from_pretrained(a_ , **a_ )
# It could be in `config.image_processor_type``
_a = getattr(a_ , "image_processor_type" , a_ )
if hasattr(a_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_a = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_a = image_processor_class_from_name(a_ )
_a = image_processor_auto_map is not None
_a = image_processor_class is not None or type(a_ ) in IMAGE_PROCESSOR_MAPPING
_a = resolve_trust_remote_code(
a_ , a_ , a_ , a_ )
if has_remote_code and trust_remote_code:
_a = get_class_from_dynamic_module(
a_ , a_ , **a_ )
_a = kwargs.pop("code_revision" , a_ )
if os.path.isdir(a_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a_ , **a_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(a_ , **a_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a_ ) in IMAGE_PROCESSOR_MAPPING:
_a = IMAGE_PROCESSOR_MAPPING[type(a_ )]
return image_processor_class.from_dict(a_ , **a_ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase__ ( a_ , a_ ) ->Optional[int]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(a_ , a_ )
| 612
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: List[Any] ={
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[Any] =[
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase: Optional[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 607
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase: List[Any] =logging.get_logger(__name__)
class lowerCamelCase__ :
def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ) -> Any:
"""simple docstring"""
if not conversation_id:
lowercase : int = uuid.uuida()
if past_user_inputs is None:
lowercase : Any = []
if generated_responses is None:
lowercase : Dict = []
lowercase : uuid.UUID = conversation_id
lowercase : List[str] = past_user_inputs
lowercase : List[str] = generated_responses
lowercase : Optional[str] = text
def __eq__( self , snake_case ) -> Any:
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCAmelCase ( self , snake_case , snake_case = False ) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
lowercase : Any = text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
lowercase : Union[str, Any] = text
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase : Optional[Any] = None
def _UpperCAmelCase ( self , snake_case ) -> Tuple:
"""simple docstring"""
self.generated_responses.append(snake_case )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
lowercase : Any = """user""" if is_user else """bot"""
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
__UpperCamelCase , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class lowerCamelCase__ ( __UpperCamelCase ):
def __init__( self , *snake_case , **snake_case ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*snake_case , **snake_case )
if self.tokenizer.pad_token_id is None:
lowercase : Union[str, Any] = self.tokenizer.eos_token
def _UpperCAmelCase ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ) -> Tuple:
"""simple docstring"""
lowercase : int = {}
lowercase : Union[str, Any] = {}
lowercase : Union[str, Any] = {}
if min_length_for_response is not None:
lowercase : List[Any] = min_length_for_response
if minimum_tokens is not None:
lowercase : Dict = minimum_tokens
if "max_length" in generate_kwargs:
lowercase : List[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase : List[str] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case )
return preprocess_params, forward_params, postprocess_params
def __call__( self , snake_case , snake_case=0 , **snake_case ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = super().__call__(snake_case , num_workers=snake_case , **snake_case )
if isinstance(snake_case , snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
def _UpperCAmelCase ( self , snake_case , snake_case=3_2 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
lowercase : Any = self.tokenizer._build_conversation_input_ids(snake_case )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase : Any = self._legacy_parse_and_tokenize(snake_case )
if self.framework == "pt":
lowercase : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase : str = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCAmelCase ( self , snake_case , snake_case=1_0 , **snake_case ) -> int:
"""simple docstring"""
lowercase : Any = generate_kwargs.get("""max_length""" , self.model.config.max_length )
lowercase : Tuple = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
lowercase : List[Any] = max_length - minimum_tokens
lowercase : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
lowercase : int = model_inputs["""attention_mask"""][:, -trim:]
lowercase : int = model_inputs.pop("""conversation""" )
lowercase : Optional[int] = max_length
lowercase : Optional[int] = self.model.generate(**snake_case , **snake_case )
if self.model.config.is_encoder_decoder:
lowercase : Union[str, Any] = 1
else:
lowercase : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCAmelCase ( self , snake_case , snake_case=True ) -> List[str]:
"""simple docstring"""
lowercase : int = model_outputs["""output_ids"""]
lowercase : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , )
lowercase : str = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(snake_case )
return conversation
def _UpperCAmelCase ( self , snake_case ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.tokenizer.eos_token_id
lowercase : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
if len(snake_case ) > self.tokenizer.model_max_length:
lowercase : Union[str, Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 607
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = "encoder-decoder"
lowerCamelCase = True
def __init__( self : Tuple , **__magic_name__ : Optional[Any] ) -> Optional[Any]:
super().__init__(**__UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase_ : Any = kwargs.pop("encoder" )
lowerCamelCase_ : List[str] = encoder_config.pop("model_type" )
lowerCamelCase_ : Optional[Any] = kwargs.pop("decoder" )
lowerCamelCase_ : Optional[Any] = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase_ : Dict = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ : str = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ : Dict = True
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __magic_name__ : PretrainedConfig , __magic_name__ : PretrainedConfig , **__magic_name__ : str ) -> PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowerCamelCase_ : int = True
lowerCamelCase_ : int = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
lowerCamelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCamelCase_ : int = self.encoder.to_dict()
lowerCamelCase_ : Optional[Any] = self.decoder.to_dict()
lowerCamelCase_ : Optional[Any] = self.__class__.model_type
return output
| 707
|
from __future__ import annotations
def __a ( __UpperCAmelCase : list[int | str] ) -> None:
"""simple docstring"""
create_state_space_tree(__UpperCAmelCase , [] , 0 , [0 for i in range(len(__UpperCAmelCase ) )] )
def __a ( __UpperCAmelCase : list[int | str] , __UpperCAmelCase : list[int | str] , __UpperCAmelCase : int , __UpperCAmelCase : list[int] , ) -> None:
"""simple docstring"""
if index == len(__UpperCAmelCase ):
print(__UpperCAmelCase )
return
for i in range(len(__UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCamelCase_ : str = True
create_state_space_tree(__UpperCAmelCase , __UpperCAmelCase , index + 1 , __UpperCAmelCase )
current_sequence.pop()
lowerCamelCase_ : Dict = False
snake_case_ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case_ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 253
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : Dict=18 , _UpperCAmelCase : int=30 , _UpperCAmelCase : List[str]=400 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=None , _UpperCAmelCase : int=True , _UpperCAmelCase : Any=[0.5, 0.5, 0.5] , _UpperCAmelCase : int=[0.5, 0.5, 0.5] , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = DPTImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = DPTImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 82
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
"""simple docstring"""
@staticmethod
def _UpperCamelCase ( *a_ , **a_ ):
pass
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[Any] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
lowerCamelCase_ : Optional[Any] = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Optional[int] = vqa_pipeline(a_ , top_k=1 )
self.assertEqual(
a_ , [
[{"score": ANY(a_ ), "answer": ANY(a_ )}],
[{"score": ANY(a_ ), "answer": ANY(a_ )}],
] , )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
lowerCamelCase_ : str = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase_ : str = "How many cats are there?"
lowerCamelCase_ : int = vqa_pipeline(image=a_ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
a_ , [{"score": ANY(a_ ), "answer": ANY(a_ )}, {"score": ANY(a_ ), "answer": ANY(a_ )}] )
lowerCamelCase_ : List[str] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
a_ , [{"score": ANY(a_ ), "answer": ANY(a_ )}, {"score": ANY(a_ ), "answer": ANY(a_ )}] )
@slow
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
lowerCamelCase_ : Tuple = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase_ : Dict = "How many cats are there?"
lowerCamelCase_ : str = vqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
lowerCamelCase_ : Any = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
lowerCamelCase_ : Optional[int] = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def _UpperCamelCase ( self ):
pass
| 250
| 0
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 721
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ConditionalDetrFeatureExtractor"]
lowercase_ = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 470
|
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def A_ ( lowercase ) -> str:
"""simple docstring"""
if not sentence:
return ""
UpperCAmelCase_ : Any = dict(zip(lowercase , lowercase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 470
| 1
|
"""simple docstring"""
UpperCamelCase : Any = "Tobias Carryer"
from time import time
class lowerCamelCase__ :
def __init__( self : Tuple , _lowercase : List[Any] , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : List[Any]=int(time() ) ): # noqa: B008
A = multiplier
A = increment
A = modulo
A = seed
def __a ( self : Tuple ):
A = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCamelCase : Any = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 91
|
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase : Optional[Any] = float("nan")
class lowerCamelCase__ :
def __init__( self : List[str] , _lowercase : Union[str, Any] ):
A = sys.stdout
A = open(_lowercase , 'a' )
def __getattr__( self : Tuple , _lowercase : Union[str, Any] ):
return getattr(self.stdout , _lowercase )
def __a ( self : Optional[int] , _lowercase : Union[str, Any] ):
self.stdout.write(_lowercase )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' , '' , _lowercase , 0 , re.M ) )
def __snake_case ( UpperCamelCase__=80 , UpperCamelCase__=False ) -> Union[str, Any]:
"""simple docstring"""
A = []
# deal with critical env vars
A = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
A = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f'{key}={val}' )
# python executable (not always needed if the script is executable)
A = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A = []
A = ''
while len(UpperCamelCase__ ) > 0:
current_line += f'{cmd.pop(0 )} '
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
A = ''
return "\\\n".join(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
A = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
A = variation.replace(' ' , '-' )
with open(Path(UpperCamelCase__ ) / f'log.{prefix}.stdout.txt' , 'w' ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f'log.{prefix}.stderr.txt' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f:
A = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str:
"""simple docstring"""
A = []
A = []
A = f'{id}: {variation:<{longest_variation_len}}'
A = f'{preamble}: '
A = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
A = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
A = f'\33[2K\r{outcome}'
if len(UpperCamelCase__ ) > 0:
A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A = round(mean_metrics[target_metric_key] , 2 )
A = f'{outcome} {mean_target}'
if len(UpperCamelCase__ ) > 1:
results_str += f' {tuple(round(UpperCamelCase__ , 2 ) for x in results )}'
print(UpperCamelCase__ )
A = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
A = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
A = pd.DataFrame(UpperCamelCase__ )
A = 'variation'
A = 'diff_%'
A = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
A = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
A = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
A = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A = df.reindex(UpperCamelCase__ , axis='columns' ) # reorder cols
# capitalize
A = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
A = df.rename(lambda UpperCamelCase__ : c.replace('_' , '<br>' ) , axis='columns' )
A = df.rename(lambda UpperCamelCase__ : c.replace('_' , '\n' ) , axis='columns' )
A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt='.2f' )]
print('\n\n'.join(UpperCamelCase__ ) )
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs='+' , required=UpperCamelCase__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=UpperCamelCase__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=UpperCamelCase__ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=UpperCamelCase__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=UpperCamelCase__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
A = parser.parse_args()
A = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
A = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
A = [list(map(str.strip , re.split(r'\|' , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A = list(map(str.strip , map(' '.join , itertools.product(*UpperCamelCase__ ) ) ) )
A = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
A = args.report_metric_keys.split()
# capture prints into a log file for convenience
A = f'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(f'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(f'and this script\'s output is also piped into {report_fn}' )
A = Tee(UpperCamelCase__ )
print(f'\n*** Running {len(UpperCamelCase__ )} benchmarks:' )
print(f'Base command: {" ".join(UpperCamelCase__ )}' )
A = 'variation'
A = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc='Total completion: ' , leave=UpperCamelCase__ ) ):
A = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 91
| 1
|
import os
def SCREAMING_SNAKE_CASE__ ( ):
with open(os.path.dirname(UpperCamelCase__ ) + """/p022_names.txt""" ) as file:
SCREAMING_SNAKE_CASE__ = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE__ = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, name in enumerate(UpperCamelCase__ ):
for letter in name:
name_score += ord(UpperCamelCase__ ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 6
|
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE__ = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE__ = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 6
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 6008_5147_5143 ):
try:
lowercase = int(__SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
lowercase = 1
lowercase = 2
while i * i <= n:
while n % i == 0:
lowercase = i
n //= i
i += 1
if n > 1:
lowercase = n
return int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = PandasConfig
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ):
lowerCamelCase_ = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCamelCase_ = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ = table_cast(SCREAMING_SNAKE_CASE_ , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ):
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as f:
lowerCamelCase_ = pa.Table.from_pandas(pd.read_pickle(SCREAMING_SNAKE_CASE_ ) )
yield i, self._cast_table(SCREAMING_SNAKE_CASE_ )
| 42
|
import numpy
# List of input, output pairs
A_ : Any = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
A_ : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
A_ : Any = [2, 4, 1, 5]
A_ : List[Any] = len(train_data)
A_ : List[Any] = 0.009
def snake_case (UpperCAmelCase__ , UpperCAmelCase__="train" ) -> Optional[int]:
return calculate_hypothesis_value(UpperCAmelCase__ , UpperCAmelCase__ ) - output(
UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: Optional[Any] = 0
for i in range(len(UpperCAmelCase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=m ) -> Optional[Any]:
UpperCamelCase_: Any = 0
for i in range(UpperCAmelCase__ ):
if index == -1:
summation_value += _error(UpperCAmelCase__ )
else:
summation_value += _error(UpperCAmelCase__ ) * train_data[i][0][index]
return summation_value
def snake_case (UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: Optional[int] = summation_of_cost_derivative(UpperCAmelCase__ , UpperCAmelCase__ ) / m
return cost_derivative_value
def snake_case () -> Union[str, Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase_: str = 0.00_0002
UpperCamelCase_: Any = 0
UpperCamelCase_: int = 0
while True:
j += 1
UpperCamelCase_: int = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase__ ) ):
UpperCamelCase_: Any = get_cost_derivative(i - 1 )
UpperCamelCase_: Optional[int] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ , rtol=UpperCAmelCase__ , ):
break
UpperCamelCase_: Optional[int] = temp_parameter_vector
print(('Number of iterations:', j) )
def snake_case () -> int:
for i in range(len(UpperCAmelCase__ ) ):
print(('Actual output value:', output(UpperCAmelCase__ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(UpperCAmelCase__ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 57
| 0
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :str = logging.get_logger(__name__)
lowercase__ :Any = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class snake_case ( __a ):
'''simple docstring'''
_A : List[str] = '''encodec'''
def __init__( self : List[Any] , __lowercase : str=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , __lowercase : int=24_000 , __lowercase : Any=1 , __lowercase : Optional[int]=False , __lowercase : Tuple=None , __lowercase : Optional[int]=None , __lowercase : str=128 , __lowercase : Tuple=32 , __lowercase : Optional[int]=1 , __lowercase : Dict=[8, 5, 4, 2] , __lowercase : List[Any]="weight_norm" , __lowercase : Union[str, Any]=7 , __lowercase : Optional[int]=7 , __lowercase : List[Any]=3 , __lowercase : Optional[int]=2 , __lowercase : List[str]=True , __lowercase : str="reflect" , __lowercase : int=2 , __lowercase : Union[str, Any]=2 , __lowercase : str=1.0 , __lowercase : str=1_024 , __lowercase : str=None , __lowercase : List[Any]=True , **__lowercase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Dict = target_bandwidths
__UpperCAmelCase : List[str] = sampling_rate
__UpperCAmelCase : Tuple = audio_channels
__UpperCAmelCase : int = normalize
__UpperCAmelCase : Optional[Any] = chunk_length_s
__UpperCAmelCase : List[Any] = overlap
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : str = num_filters
__UpperCAmelCase : Tuple = num_residual_layers
__UpperCAmelCase : Optional[Any] = upsampling_ratios
__UpperCAmelCase : Dict = norm_type
__UpperCAmelCase : Optional[int] = kernel_size
__UpperCAmelCase : int = last_kernel_size
__UpperCAmelCase : Union[str, Any] = residual_kernel_size
__UpperCAmelCase : Any = dilation_growth_rate
__UpperCAmelCase : str = use_causal_conv
__UpperCAmelCase : Optional[Any] = pad_mode
__UpperCAmelCase : List[Any] = compress
__UpperCAmelCase : str = num_lstm_layers
__UpperCAmelCase : Optional[int] = trim_right_ratio
__UpperCAmelCase : str = codebook_size
__UpperCAmelCase : List[Any] = codebook_dim if codebook_dim is not None else hidden_size
__UpperCAmelCase : str = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}''' )
super().__init__(**snake_case__ )
@property
def A_ ( self : Any ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A_ ( self : str ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A_ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A_ ( self : str ):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 706
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase__ :int = TypeVar('T')
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowercase : list[T] , __lowercase : Callable[[T, T], T] ):
'''simple docstring'''
__UpperCAmelCase : Any | T = None
__UpperCAmelCase : int = len(__lowercase )
__UpperCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr
__UpperCAmelCase : List[Any] = fnc
self.build()
def A_ ( self : str ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
__UpperCAmelCase : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def A_ ( self : Union[str, Any] , __lowercase : int , __lowercase : T ):
'''simple docstring'''
p += self.N
__UpperCAmelCase : Tuple = v
while p > 1:
__UpperCAmelCase : Union[str, Any] = p // 2
__UpperCAmelCase : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def A_ ( self : str , __lowercase : int , __lowercase : int ): # noqa: E741
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = l + self.N, r + self.N
__UpperCAmelCase : T | None = None
while l <= r:
if l % 2 == 1:
__UpperCAmelCase : Any = self.st[l] if res is None else self.fn(__lowercase , self.st[l] )
if r % 2 == 0:
__UpperCAmelCase : Any = self.st[r] if res is None else self.fn(__lowercase , self.st[r] )
__UpperCAmelCase , __UpperCAmelCase : Any = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase__ :Any = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase__ :List[Any] = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase__ :List[str] = SegmentTree(test_array, min)
lowercase__ :List[str] = SegmentTree(test_array, max)
lowercase__ :Tuple = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase_ ( ) ->None:
"""simple docstring"""
for i in range(len(UpperCAmelCase_ ) ):
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
__UpperCAmelCase : Optional[Any] = reduce(UpperCAmelCase_ , test_array[i : j + 1] )
__UpperCAmelCase : Optional[int] = reduce(UpperCAmelCase_ , test_array[i : j + 1] )
__UpperCAmelCase : Union[str, Any] = reduce(lambda UpperCAmelCase_ , UpperCAmelCase_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_ )
assert max_range == max_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_ )
assert sum_range == sum_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_ )
test_all_segments()
for index, value in test_updates.items():
lowercase__ :Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 374
| 0
|
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) if (len(SCREAMING_SNAKE_CASE_ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(SCREAMING_SNAKE_CASE_ ) , '''Postfix'''.center(SCREAMING_SNAKE_CASE_ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(SCREAMING_SNAKE_CASE_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(SCREAMING_SNAKE_CASE_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
stack.append(SCREAMING_SNAKE_CASE_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(SCREAMING_SNAKE_CASE_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(SCREAMING_SNAKE_CASE_ ) # push x to stack
print(
x.center(8 ) , (''''''.join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , (''''''.join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=''' | ''' , ) # Output in tabular format
while len(SCREAMING_SNAKE_CASE_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , (''''''.join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(SCREAMING_SNAKE_CASE_ ) # return Postfix as str
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if infix[i] == "(":
UpperCAmelCase = ''')''' # change "(" to ")"
elif infix[i] == ")":
UpperCAmelCase = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(SCREAMING_SNAKE_CASE_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a__ : str = input('\nEnter an Infix Equation = ') # Input an Infix equation
a__ : int = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 51
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =LEDConfig
_lowerCamelCase ={}
_lowerCamelCase ="gelu"
def __init__( self : Tuple , a__ : Any , a__ : int=13 , a__ : List[Any]=7 , a__ : int=True , a__ : Union[str, Any]=False , a__ : Tuple=99 , a__ : Any=32 , a__ : List[Any]=2 , a__ : Any=4 , a__ : List[Any]=37 , a__ : List[Any]=0.1 , a__ : Any=0.1 , a__ : Optional[int]=20 , a__ : List[Any]=2 , a__ : Union[str, Any]=1 , a__ : List[Any]=0 , a__ : Union[str, Any]=4 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ )
UpperCAmelCase = tf.concat(
[tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , )
UpperCAmelCase = global_attention_mask
return config, inputs_dict
def __snake_case ( self : Optional[int] , a__ : List[str] , a__ : int ):
UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ )
UpperCAmelCase, UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(a__ , attention_mask=a__ )[0]
UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1e-3 )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase =(
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase =True
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = TFLEDModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ )
def __snake_case ( self : int ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCAmelCase = 2
UpperCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCAmelCase = True
UpperCAmelCase = self.model_tester.seq_length
UpperCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a__ : Tuple ):
UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a__ : int ):
UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = len(a__ )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
if self.is_encoder_decoder:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_decoder_attentions_output(a__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) )
self.assertEqual(model.config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Union[str, Any] ):
# TODO: Head-masking not yet implement
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
a__ : int = 1e-4
@slow
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, 768)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 )
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 , rtol=1e-3 )
| 51
| 1
|
def lowerCamelCase_ ( lowerCamelCase__ ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCamelCase__ ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'vit'
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0_2 , lowercase=1e-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=16 , **lowercase , ) -> int:
super().__init__(**lowercase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = encoder_stride
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_( self ) -> float:
return 1e-4
| 313
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : str = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class A ( _a ):
lowercase_ = 'dpr'
def __init__( self : Any , lowerCAmelCase_ : Any=3_05_22 , lowerCAmelCase_ : Tuple=7_68 , lowerCAmelCase_ : Union[str, Any]=12 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[Any]=5_12 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[Any]=0.0_2 , lowerCAmelCase_ : Any=1e-12 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[Any]="absolute" , lowerCAmelCase_ : int = 0 , **lowerCAmelCase_ : Any , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = projection_dim
_a = position_embedding_type
| 22
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22
| 1
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : str = False
def __A ( a_ : Namespace )-> Tuple:
'''simple docstring'''
return TrainCommand(a_ )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@staticmethod
def __lowerCAmelCase ( lowerCamelCase_ :ArgumentParser ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=lowerCamelCase_ , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=lowerCamelCase_ , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=lowerCamelCase_ , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=lowerCamelCase_ , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=lowerCamelCase_ , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=lowerCamelCase_ , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=lowerCamelCase_ , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=lowerCamelCase_ , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=lowerCamelCase_ , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=lowerCamelCase_ , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=lowerCamelCase_ , default=3E-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=lowerCamelCase_ , default=1E-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self :Union[str, Any] , lowerCamelCase_ :Namespace ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE : Optional[int] = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = args.output
SCREAMING_SNAKE_CASE : List[str] = args.column_label
SCREAMING_SNAKE_CASE : Dict = args.column_text
SCREAMING_SNAKE_CASE : Dict = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE : Dict = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE : Any = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE : Dict = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE : List[Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE : Any = args.validation_split
SCREAMING_SNAKE_CASE : Union[str, Any] = args.train_batch_size
SCREAMING_SNAKE_CASE : List[Any] = args.valid_batch_size
SCREAMING_SNAKE_CASE : List[str] = args.learning_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = args.adam_epsilon
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
raise NotImplementedError
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 18
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18
| 1
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665
| 1
|
"""simple docstring"""
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
UpperCamelCase__ = TypeVar('''T''')
class a__ ( Generic[T] ):
def __init__( self : int ,a__ : bool = True) -> None:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = {} # dictionary of lists
_lowerCAmelCase:Optional[Any] = directed
def __UpperCamelCase ( self : Optional[Any] ,a__ : T ,a__ : T) -> GraphAdjacencyList[T]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase)
self.adj_list[destination_vertex].append(__UpperCamelCase)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase)
_lowerCAmelCase:List[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__UpperCamelCase)
_lowerCAmelCase:Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCAmelCase:Any = [destination_vertex]
_lowerCAmelCase:Union[str, Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase)
_lowerCAmelCase:Union[str, Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCAmelCase:List[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCAmelCase:Optional[int] = [destination_vertex]
_lowerCAmelCase:List[str] = []
return self
def __repr__( self : List[str]) -> str:
"""simple docstring"""
return pformat(self.adj_list)
| 707
|
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCAmelCase ( ):
_lowerCAmelCase:List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
_lowerCAmelCase:Union[str, Any] = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(snake_case )
DownloadCommand.register_subcommand(snake_case )
EnvironmentCommand.register_subcommand(snake_case )
RunCommand.register_subcommand(snake_case )
ServeCommand.register_subcommand(snake_case )
UserCommands.register_subcommand(snake_case )
AddNewModelCommand.register_subcommand(snake_case )
AddNewModelLikeCommand.register_subcommand(snake_case )
LfsCommands.register_subcommand(snake_case )
PTtoTFCommand.register_subcommand(snake_case )
# Let's go
_lowerCAmelCase:Union[str, Any] = parser.parse_args()
if not hasattr(snake_case , '''func''' ):
parser.print_help()
exit(1 )
# Run
_lowerCAmelCase:Any = args.func(snake_case )
service.run()
if __name__ == "__main__":
main()
| 439
| 0
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : str = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ : List[str] = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
return AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : str = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ , *a__ : Tuple = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE , tempfile.mkdtemp() , e=1 , d=_SCREAMING_SNAKE_CASE )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE , tempfile.mkdtemp() , e=1 , d=_SCREAMING_SNAKE_CASE )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE , tempfile.mkdtemp() , e=_SCREAMING_SNAKE_CASE , d=_SCREAMING_SNAKE_CASE )
| 112
|
'''simple docstring'''
from typing import Any
class _a :
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
_snake_case = data
_snake_case = None
class _a :
def __init__( self ) -> List[Any]:
_snake_case = None
def _lowercase ( self ) -> str:
_snake_case = self.head
while temp is not None:
print(temp.data ,end=" " )
_snake_case = temp.next
print()
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
_snake_case = Node(_SCREAMING_SNAKE_CASE )
_snake_case = self.head
_snake_case = new_node
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
if node_data_a == node_data_a:
return
else:
_snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
_snake_case = node_a.next
_snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
_snake_case = node_a.next
if node_a is None or node_a is None:
return
_snake_case , _snake_case = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase_ : str = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 185
| 0
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowercase_ = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def snake_case_ ():
'''simple docstring'''
if os.name == "nt":
_a = CursorInfo()
_a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
_a = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def snake_case_ ():
'''simple docstring'''
if os.name == "nt":
_a = CursorInfo()
_a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
_a = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def snake_case_ ():
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 377
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def snake_case_ (UpperCamelCase : BertModel , UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
_a = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(UpperCamelCase ):
os.makedirs(UpperCamelCase )
_a = model.state_dict()
def to_tf_var_name(UpperCamelCase : str ):
for patt, repl in iter(UpperCamelCase ):
_a = name.replace(UpperCamelCase , UpperCamelCase )
return f'bert/{name}'
def create_tf_var(UpperCamelCase : np.ndarray , UpperCamelCase : str , UpperCamelCase : tf.Session ):
_a = tf.dtypes.as_dtype(tensor.dtype )
_a = tf.get_variable(dtype=UpperCamelCase , shape=tensor.shape , name=UpperCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_a = to_tf_var_name(UpperCamelCase )
_a = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_a = torch_tensor.T
_a = create_tf_var(tensor=UpperCamelCase , name=UpperCamelCase , session=UpperCamelCase )
tf.keras.backend.set_value(UpperCamelCase , UpperCamelCase )
_a = session.run(UpperCamelCase )
print(f'Successfully created {tf_name}: {np.allclose(UpperCamelCase , UpperCamelCase )}' )
_a = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCamelCase , os.path.join(UpperCamelCase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def snake_case_ (UpperCamelCase : Tuple=None ):
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCamelCase , required=UpperCamelCase , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=UpperCamelCase , default=UpperCamelCase , required=UpperCamelCase , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=UpperCamelCase , required=UpperCamelCase , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=UpperCamelCase , required=UpperCamelCase , help='''Directory in which to save tensorflow model''' )
_a = parser.parse_args(UpperCamelCase )
_a = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 377
| 1
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowercase = True
for i in range(0 , len(lowerCAmelCase_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowercase , __lowercase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowercase = False
for i in range(1 , len(lowerCAmelCase_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowercase , __lowercase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowercase = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
__UpperCamelCase : Any = [int(x) for x in input().split()]
# inputing elements of the list in one line
__UpperCamelCase : Optional[int] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 80
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = VideoToVideoSDPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ = False
# No `output_type`.
snake_case__ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def a ( self : int ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowerCAmelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict=0 ) -> Tuple:
# 3 frames
lowerCAmelCase__ = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def a ( self : Union[str, Any] ) -> str:
lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = VideoToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = "np"
lowerCAmelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames
lowerCAmelCase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
lowerCAmelCase__ = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a ( self : List[Any] ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def a ( self : List[Any] ) -> str:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def a ( self : int ) -> Optional[Any]:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def a ( self : List[str] ) -> Optional[int]:
pass
def a ( self : Optional[Any] ) -> Tuple:
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : str ) -> int:
lowerCAmelCase__ = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
lowerCAmelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase__ = torch.randn((1, 10, 3, 1_024, 576) , generator=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = video.to("cuda" )
lowerCAmelCase__ = "Spiderman is surfing"
lowerCAmelCase__ = pipe(SCREAMING_SNAKE_CASE__ , video=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=3 , output_type="pt" ).frames
lowerCAmelCase__ = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 61
| 0
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCAmelCase ( A__ , A__ = "cpu" , A__ = None ) -> None:
_snake_case : Union[str, Any] = torch.load(A__ , map_location=A__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(A__ , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
_snake_case : Tuple = v.half()
if save_path is None: # overwrite src_path
_snake_case : Optional[Any] = src_path
torch.save(A__ , A__ )
if __name__ == "__main__":
fire.Fire(convert)
| 519
|
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase_ = list[list[float | int]]
def UpperCAmelCase ( A__ , A__ ) -> Matrix:
_snake_case : int = len(A__ )
_snake_case : Matrix = [[0 for _ in range(size + 1 )] for _ in range(A__ )]
_snake_case : int
_snake_case : int
_snake_case : int
_snake_case : int
_snake_case : int
_snake_case : float
for row in range(A__ ):
for col in range(A__ ):
_snake_case : Optional[int] = matrix[row][col]
_snake_case : Dict = vector[row][0]
_snake_case : str = 0
_snake_case : Any = 0
while row < size and col < size:
# pivoting
_snake_case : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(A__ , A__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_snake_case , _snake_case : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , A__ ):
_snake_case : Optional[int] = augmented[rowa][col] / augmented[row][col]
_snake_case : int = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , A__ ):
for row in range(A__ ):
_snake_case : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(A__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(A__ )
]
def UpperCAmelCase ( A__ ) -> Callable[[int], int]:
_snake_case : int = len(A__ )
_snake_case : Matrix = [[0 for _ in range(A__ )] for _ in range(A__ )]
_snake_case : Matrix = [[0] for _ in range(A__ )]
_snake_case : Matrix
_snake_case : int
_snake_case : int
_snake_case : int
for x_val, y_val in enumerate(A__ ):
for col in range(A__ ):
_snake_case : Any = (x_val + 1) ** (size - col - 1)
_snake_case : Dict = y_val
_snake_case : List[str] = solve(A__ , A__ )
def interpolated_func(A__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(A__ ) )
return interpolated_func
def UpperCAmelCase ( A__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase ( A__ = question_function , A__ = 10 ) -> int:
_snake_case : list[int] = [func(A__ ) for x_val in range(1 , order + 1 )]
_snake_case : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_snake_case : int = 0
_snake_case : Callable[[int], int]
_snake_case : int
for poly in polynomials:
_snake_case : int = 1
while func(A__ ) == poly(A__ ):
x_val += 1
ret += poly(A__ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 519
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 8
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a__ = ["""gpt2"""]
a__ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = tokenizer
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__)
snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.tokenizer(UpperCamelCase__)
snake_case__ = tokenized["""input_ids"""].to_tensor()
snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().setUp()
snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
snake_case__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1]))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""")
snake_case__ = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case__ = python_outputs[key].numpy()
snake_case__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values))
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.function(UpperCamelCase__)
for test_inputs in self.test_sentences:
snake_case__ = tf.constant(UpperCamelCase__)
snake_case__ = compiled_tokenizer(UpperCamelCase__)
snake_case__ = tf_tokenizer(UpperCamelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = ModelToSave(tokenizer=UpperCamelCase__)
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case__ = Path(UpperCamelCase__) / """saved.model"""
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving})
snake_case__ = tf.saved_model.load(UpperCamelCase__)
snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def __magic_name__ ( self : Tuple):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs
snake_case__ = tf_tokenizer.get_config()
snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__)
snake_case__ = model_from_config(UpperCamelCase__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def __magic_name__ ( self : Dict):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__)
snake_case__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 654
| 0
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = a.name
lowerCAmelCase_ : Union[str, Any] = b.name
lowerCAmelCase_ : Any = ''
lowerCAmelCase_ : Optional[int] = ''
lowerCAmelCase_ : int = a == b
lowerCAmelCase_ : Optional[int] = name_a
lowerCAmelCase_ : Tuple = name_b
return res
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any ) -> Any:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase__ , lowerCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase__ , lowerCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase__ , lowerCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = list(model.graph.initializer )
lowerCAmelCase_ : Tuple = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCAmelCase_ : List[Any] = inits[i].name
lowerCAmelCase_ : Tuple = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = os.path.dirname(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = os.path.basename(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = onnx.load(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = list(model.graph.initializer )
lowerCAmelCase_ : Tuple = set()
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Optional[int] = 0
for i in range(len(lowerCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase__ )
dup_set.add(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = inits[j].data_type
lowerCAmelCase_ : Optional[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCAmelCase__ )
total_reduced_size += mem_size
lowerCAmelCase_ : int = inits[i].name
lowerCAmelCase_ : Dict = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
lowerCAmelCase_ : int = sorted(lowerCAmelCase__ )
_remove_dup_initializers_from_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = 'optimized_' + model_file_name
lowerCAmelCase_ : int = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
onnx.save(lowerCAmelCase__ , lowerCAmelCase__ )
return new_model
| 317
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ : List[str] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : Tuple = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ , param_name='crop_size' )
lowerCAmelCase_ : Any = do_resize
lowerCAmelCase_ : Dict = do_rescale
lowerCAmelCase_ : int = do_normalize
lowerCAmelCase_ : int = do_center_crop
lowerCAmelCase_ : Any = crop_size
lowerCAmelCase_ : Any = size
lowerCAmelCase_ : str = resample
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase_ : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "shortest_edge" in size:
lowerCAmelCase_ : List[str] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowerCAmelCase_ : List[str] = (size['height'], size['width'])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase_ : str = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Any ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : Any = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = resample if resample is not None else self.resample
lowerCAmelCase_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : Dict = size if size is not None else self.size
lowerCAmelCase_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if not is_batched(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : str = [images]
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowerCAmelCase_ : int = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowerCAmelCase_ : List[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowerCAmelCase_ : Optional[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
lowerCAmelCase_ : Tuple = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 317
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
UpperCAmelCase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def SCREAMING_SNAKE_CASE ( a_ : str ):
__a = {}
with open(__lowercase , 'r' ) as file:
for line_number, line in enumerate(__lowercase ):
__a = line.strip()
if line:
__a = line.split()
__a = line_number
__a = words[0]
__a = value
return result
def SCREAMING_SNAKE_CASE ( a_ : List[Any] , a_ : Tuple , a_ : Union[str, Any] , a_ : List[Any] , a_ : str ):
for attribute in key.split('.' ):
__a = getattr(__lowercase , __lowercase )
__a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowercase ):
__a = PARAM_MAPPING[full_name.split('.' )[-1]]
__a = """param"""
if weight_type is not None and weight_type != "param":
__a = getattr(__lowercase , __lowercase ).shape
elif weight_type is not None and weight_type == "param":
__a = hf_pointer
for attribute in hf_param_name.split('.' ):
__a = getattr(__lowercase , __lowercase )
__a = shape_pointer.shape
# let's reduce dimension
__a = value[0]
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__a = getattr(__lowercase , __lowercase )
__a = value
else:
__a = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( a_ : int , a_ : Tuple , a_ : List[Any] , a_ : Tuple , a_ : Optional[Any] ):
__a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowercase ):
__a = PARAM_MAPPING[full_name.split('.' )[-1]]
__a = """param"""
if weight_type is not None and weight_type != "param":
__a = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__a = """.""".join([key, hf_param_name] )
else:
__a = key
__a = value if """lm_head""" in full_key else value[0]
UpperCAmelCase_ = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def SCREAMING_SNAKE_CASE ( a_ : Optional[int] , a_ : List[Any] , a_ : str=None , a_ : List[Any]=None ):
__a = False
for key, mapped_key in MAPPING.items():
__a = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(__lowercase )[0].split('.' )[-2]
__a = mapped_key.replace('*' , __lowercase )
if "weight_g" in name:
__a = """weight_g"""
elif "weight_v" in name:
__a = """weight_v"""
elif "bias" in name:
__a = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a = """weight"""
else:
__a = None
if hf_dict is not None:
rename_dict(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
else:
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
return is_used
return is_used
def SCREAMING_SNAKE_CASE ( a_ : int , a_ : str , a_ : int ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == 'group' , )
__a = True
else:
__a = load_wavaveca_layer(__lowercase , __lowercase , __lowercase )
if not is_used:
unused_weights.append(__lowercase )
logger.warning(f"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE ( a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : List[str] ):
__a = full_name.split('conv_layers.' )[-1]
__a = name.split('.' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__a = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__a = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__a = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__a = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( a_ : Any , a_ : Dict , a_ : int=None , a_ : Optional[int]=None , a_ : List[Any]=True , a_ : Any=False ):
if config_path is not None:
__a = WavaVecaConfig.from_pretrained(__lowercase )
else:
__a = WavaVecaConfig()
if is_seq_class:
__a = read_txt_into_dict(__lowercase )
__a = idalabel
__a = WavaVecaForSequenceClassification(__lowercase )
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowercase , return_attention_mask=__lowercase , )
feature_extractor.save_pretrained(__lowercase )
elif is_finetuned:
if dict_path:
__a = Dictionary.load(__lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a = target_dict.pad_index
__a = target_dict.bos_index
__a = target_dict.eos_index
__a = len(target_dict.symbols )
__a = os.path.join(__lowercase , 'vocab.json' )
if not os.path.isdir(__lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowercase ) )
return
os.makedirs(__lowercase , exist_ok=__lowercase )
__a = target_dict.indices
# fairseq has the <pad> and <s> switched
__a = 0
__a = 1
with open(__lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__lowercase , __lowercase )
__a = WavaVecaCTCTokenizer(
__lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__lowercase , )
__a = True if config.feat_extract_norm == """layer""" else False
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowercase , return_attention_mask=__lowercase , )
__a = WavaVecaProcessor(feature_extractor=__lowercase , tokenizer=__lowercase )
processor.save_pretrained(__lowercase )
__a = WavaVecaForCTC(__lowercase )
else:
__a = WavaVecaForPreTraining(__lowercase )
if is_finetuned or is_seq_class:
__a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__a = argparse.Namespace(task='audio_pretraining' )
__a = fairseq.tasks.setup_task(__lowercase )
__a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowercase )
__a = model[0].eval()
recursively_load_weights(__lowercase , __lowercase , not is_finetuned )
hf_wavavec.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 539
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase : Any = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
lowercase : List[str] = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase__ ( ):
snake_case : Optional[int] = cn.convert_to_negative(__lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase__ ( ):
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowercase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def lowerCamelCase__ ( ):
snake_case : Union[str, Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase__ ( ):
snake_case : Optional[Any] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
snake_case : Any = canny.canny(__lowercase )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase__ ( ):
assert gg.gaussian_filter(__lowercase , 5 , sigma=0.9 ).all()
def lowerCamelCase__ ( ):
# laplace diagonals
snake_case : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
snake_case : Tuple = conv.img_convolve(__lowercase , __lowercase ).astype(__lowercase )
assert res.any()
def lowerCamelCase__ ( ):
assert med.median_filter(__lowercase , 3 ).any()
def lowerCamelCase__ ( ):
snake_case , snake_case : Dict = sob.sobel_filter(__lowercase )
assert grad.any() and theta.any()
def lowerCamelCase__ ( ):
snake_case : int = sp.make_sepia(__lowercase , 20 )
assert sepia.all()
def lowerCamelCase__ ( __lowercase = "digital_image_processing/image_data/lena_small.jpg" ):
snake_case : int = bs.Burkes(imread(__lowercase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase__ ( __lowercase = "digital_image_processing/image_data/lena_small.jpg" , ):
snake_case : Dict = rs.NearestNeighbour(imread(__lowercase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase__ ( ):
snake_case : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
snake_case : Tuple = imread(__lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
snake_case : Optional[int] = 0
snake_case : Tuple = 0
snake_case : Any = image[x_coordinate][y_coordinate]
snake_case : Any = lbp.get_neighbors_pixel(
__lowercase , __lowercase , __lowercase , __lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
snake_case : List[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
snake_case : Optional[Any] = lbp.local_binary_value(__lowercase , __lowercase , __lowercase )
assert lbp_image.any()
| 116
| 0
|
def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679
| 1
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 5_000 ):
'''simple docstring'''
lowercase__ : int = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE_ )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : Tuple = pentagonal_nums[j]
lowercase__ : Union[str, Any] = pentagonal_i + pentagonal_j
lowercase__ : int = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE_ ) and is_pentagonal(SCREAMING_SNAKE_CASE_ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 164
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 164
| 1
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : List[str] = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = downstream_dict["projector.weight"]
SCREAMING_SNAKE_CASE = downstream_dict["projector.bias"]
SCREAMING_SNAKE_CASE = downstream_dict["model.post_net.linear.weight"]
SCREAMING_SNAKE_CASE = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = downstream_dict["model.linear.weight"]
SCREAMING_SNAKE_CASE = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ):
SCREAMING_SNAKE_CASE = WavaVecaForXVector.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = downstream_dict["connector.weight"]
SCREAMING_SNAKE_CASE = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
SCREAMING_SNAKE_CASE = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
SCREAMING_SNAKE_CASE = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
SCREAMING_SNAKE_CASE = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
SCREAMING_SNAKE_CASE = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
SCREAMING_SNAKE_CASE = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
SCREAMING_SNAKE_CASE = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location="cpu" )
SCREAMING_SNAKE_CASE = checkpoint["Downstream"]
SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
SCREAMING_SNAKE_CASE = convert_classification(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
elif arch.endswith("ForAudioFrameClassification" ):
SCREAMING_SNAKE_CASE = convert_diarization(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
elif arch.endswith("ForXVector" ):
SCREAMING_SNAKE_CASE = convert_xvector(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(UpperCAmelCase__ )
hf_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 647
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( a ):
lowercase__ : Optional[Any] = ["""input_features""", """is_longer"""]
def __init__( self : str , _UpperCamelCase : Optional[int]=64 , _UpperCamelCase : Any=48_000 , _UpperCamelCase : Optional[Any]=480 , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : Any=1_024 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Any=False , _UpperCamelCase : float = 0 , _UpperCamelCase : float = 14_000 , _UpperCamelCase : int = None , _UpperCamelCase : str = "fusion" , _UpperCamelCase : str = "repeatpad" , **_UpperCamelCase : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = top_db
SCREAMING_SNAKE_CASE = truncation
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = fft_window_size
SCREAMING_SNAKE_CASE = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = max_length_s
SCREAMING_SNAKE_CASE = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = frequency_min
SCREAMING_SNAKE_CASE = frequency_max
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm=_UpperCamelCase , mel_scale="htk" , )
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case( self : str ) -> Dict[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __snake_case( self : Optional[Any] , _UpperCamelCase : np.array , _UpperCamelCase : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = spectrogram(
_UpperCamelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_UpperCamelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def __snake_case( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
_UpperCamelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=_UpperCamelCase )
SCREAMING_SNAKE_CASE = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __snake_case( self : Optional[int] , _UpperCamelCase : np.array , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) - max_length
SCREAMING_SNAKE_CASE = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE = False
else:
SCREAMING_SNAKE_CASE = self._random_mel_fusion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
SCREAMING_SNAKE_CASE = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.pad(_UpperCamelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Dict , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : str = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Tuple , ) -> BatchFeature:
'''simple docstring'''
SCREAMING_SNAKE_CASE = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE = [
self._get_input_mel(_UpperCamelCase , max_length if max_length else self.nb_max_samples , _UpperCamelCase , _UpperCamelCase )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for mel, longer in padded_inputs:
input_mel.append(_UpperCamelCase )
is_longer.append(_UpperCamelCase )
if truncation == "fusion" and sum(_UpperCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE = np.random.randint(0 , len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = True
if isinstance(input_mel[0] , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE = {"input_features": input_mel, "is_longer": is_longer}
SCREAMING_SNAKE_CASE = BatchFeature(_UpperCamelCase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE = input_features.convert_to_tensors(_UpperCamelCase )
return input_features
| 647
| 1
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
UpperCamelCase_ = "src/transformers"
UpperCamelCase_ = "docs/source/en/tasks"
def _UpperCAmelCase ( UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: Tuple ):
"""simple docstring"""
with open(UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start prompt.
__lowerCAmelCase = 0
while not lines[start_index].startswith(UpperCamelCase ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
while not lines[end_index].startswith(UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase_ = direct_transformers_import(TRANSFORMERS_PATH)
UpperCamelCase_ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
UpperCamelCase_ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = TASK_GUIDE_TO_MODELS[task_guide]
__lowerCAmelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase , set() )
__lowerCAmelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _UpperCAmelCase ( UpperCamelCase: Union[str, Any] , UpperCamelCase: str=False ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = _find_text_in_file(
filename=os.path.join(UpperCamelCase , UpperCamelCase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__lowerCAmelCase = get_model_list_for_task(UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCamelCase_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 611
|
def _UpperCAmelCase ( UpperCamelCase: int ):
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(UpperCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 611
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : Dict = """lxmert"""
A_ : List[str] = {}
def __init__( self : Optional[int] , a__ : int=3_0522 , a__ : Optional[int]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=9500 , a__ : List[Any]=1600 , a__ : Optional[int]=400 , a__ : List[Any]=3072 , a__ : Any="gelu" , a__ : int=0.1 , a__ : str=0.1 , a__ : List[str]=512 , a__ : int=2 , a__ : List[Any]=0.02 , a__ : List[Any]=1e-12 , a__ : List[Any]=9 , a__ : Union[str, Any]=5 , a__ : Any=5 , a__ : List[str]=2048 , a__ : Union[str, Any]=4 , a__ : Optional[Any]=6.67 , a__ : Any=True , a__ : Optional[int]=True , a__ : Dict=True , a__ : Optional[Any]=True , a__ : List[str]=True , a__ : str=True , a__ : int=True , **a__ : int , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Union[str, Any] = num_qa_labels
lowerCAmelCase__ : int = num_object_labels
lowerCAmelCase__ : List[str] = num_attr_labels
lowerCAmelCase__ : Optional[int] = l_layers
lowerCAmelCase__ : Any = x_layers
lowerCAmelCase__ : Any = r_layers
lowerCAmelCase__ : str = visual_feat_dim
lowerCAmelCase__ : List[str] = visual_pos_dim
lowerCAmelCase__ : str = visual_loss_normalizer
lowerCAmelCase__ : Optional[int] = task_matched
lowerCAmelCase__ : Optional[Any] = task_mask_lm
lowerCAmelCase__ : Dict = task_obj_predict
lowerCAmelCase__ : int = task_qa
lowerCAmelCase__ : Any = visual_obj_loss
lowerCAmelCase__ : List[str] = visual_attr_loss
lowerCAmelCase__ : Optional[int] = visual_feat_loss
lowerCAmelCase__ : Optional[int] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**a__ )
| 568
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ):
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
__lowercase , __lowercase , __lowercase = equationa
__lowercase , __lowercase , __lowercase = equationa
# Calculate the determinants of the matrices
__lowercase = aa * ba - aa * ba
__lowercase = ca * ba - ca * ba
__lowercase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__lowercase = determinant_x / determinant
__lowercase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 502
|
import random
class _lowercase :
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : str ) -> tuple[list[int], list[int]]:
"""simple docstring"""
A_ = [ord(lowerCamelCase__ ) for i in text]
A_ = []
A_ = []
for i in plain:
A_ = random.randint(1 , 3_0_0 )
A_ = (i + k) * k
cipher.append(lowerCamelCase__ )
key.append(lowerCamelCase__ )
return cipher, key
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] ) -> str:
"""simple docstring"""
A_ = []
for i in range(len(lowerCamelCase__ ) ):
A_ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase__ ) )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
__lowercase , __lowercase = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 203
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__SCREAMING_SNAKE_CASE : Any =[
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__SCREAMING_SNAKE_CASE : Optional[int] ='''UperNetConfig'''
class A_ ( nn.Module ):
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Tuple = 0 , snake_case__ : int = False , snake_case__ : int = 1 , ):
super().__init__()
lowercase = nn.Convad(
in_channels=snake_case__ , out_channels=snake_case__ , kernel_size=snake_case__ , padding=snake_case__ , bias=snake_case__ , dilation=snake_case__ , )
lowercase = nn.BatchNormad(snake_case__ )
lowercase = nn.ReLU()
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : int ):
lowercase = self.conv(snake_case__ )
lowercase = self.batch_norm(snake_case__ )
lowercase = self.activation(snake_case__ )
return output
class A_ ( nn.Module ):
def __init__( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
super().__init__()
lowercase = [
nn.AdaptiveAvgPoolad(snake_case__ ),
UperNetConvModule(snake_case__ , snake_case__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(snake_case__ ) , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[Any] ):
lowercase = input
for layer in self.layers:
lowercase = layer(snake_case__ )
return hidden_state
class A_ ( nn.Module ):
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Tuple ):
super().__init__()
lowercase = pool_scales
lowercase = align_corners
lowercase = in_channels
lowercase = channels
lowercase = []
for i, pool_scale in enumerate(snake_case__ ):
lowercase = UperNetPyramidPoolingBlock(pool_scale=snake_case__ , in_channels=snake_case__ , channels=snake_case__ )
self.blocks.append(snake_case__ )
self.add_module(str(snake_case__ ) , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Dict ):
lowercase = []
for ppm in self.blocks:
lowercase = ppm(snake_case__ )
lowercase = nn.functional.interpolate(
snake_case__ , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(snake_case__ )
return ppm_outs
class A_ ( nn.Module ):
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
super().__init__()
lowercase = config
lowercase = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase = in_channels
lowercase = config.hidden_size
lowercase = False
lowercase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowercase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowercase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowercase = nn.ModuleList()
lowercase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase = UperNetConvModule(snake_case__ , self.channels , kernel_size=1 )
lowercase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(snake_case__ )
self.fpn_convs.append(snake_case__ )
lowercase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : List[Any] ):
if isinstance(snake_case__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Dict ):
lowercase = inputs[-1]
lowercase = [x]
psp_outs.extend(self.psp_modules(snake_case__ ) )
lowercase = torch.cat(snake_case__ , dim=1 )
lowercase = self.bottleneck(snake_case__ )
return output
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : List[str] ):
# build laterals
lowercase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(snake_case__ ) )
# build top-down path
lowercase = len(snake_case__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase = laterals[i - 1].shape[2:]
lowercase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=snake_case__ , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
lowercase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
lowercase = torch.cat(snake_case__ , dim=1 )
lowercase = self.fpn_bottleneck(snake_case__ )
lowercase = self.classifier(snake_case__ )
return output
class A_ ( nn.Module ):
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Tuple = 2 , snake_case__ : Optional[int] = 3 , snake_case__ : str = 1 ):
super().__init__()
lowercase = config
lowercase = config.auxiliary_in_channels
lowercase = config.auxiliary_channels
lowercase = config.auxiliary_num_convs
lowercase = config.auxiliary_concat_input
lowercase = in_index
lowercase = (kernel_size // 2) * dilation
lowercase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=snake_case__ , padding=snake_case__ , dilation=snake_case__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=snake_case__ , padding=snake_case__ , dilation=snake_case__ ) )
if self.num_convs == 0:
lowercase = nn.Identity()
else:
lowercase = nn.Sequential(*snake_case__ )
if self.concat_input:
lowercase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=snake_case__ , padding=kernel_size // 2 )
lowercase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Optional[Any] ):
if isinstance(snake_case__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : str ):
# just take the relevant feature maps
lowercase = encoder_hidden_states[self.in_index]
lowercase = self.convs(snake_case__ )
if self.concat_input:
lowercase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowercase = self.classifier(snake_case__ )
return output
class A_ ( __a ):
_A :List[str] = UperNetConfig
_A :List[str] = '''pixel_values'''
_A :Any = True
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Optional[int] ):
if isinstance(snake_case__ , snake_case__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : List[str] , snake_case__ : str=False ):
if isinstance(snake_case__ , snake_case__ ):
lowercase = value
__SCREAMING_SNAKE_CASE : Dict =R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__SCREAMING_SNAKE_CASE : str =R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , __a , )
class A_ ( __a ):
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any] ):
super().__init__(snake_case__ )
lowercase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase = UperNetHead(snake_case__ , in_channels=self.backbone.channels )
lowercase = UperNetFCNHead(snake_case__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=snake_case__ , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] = None , snake_case__ : Tuple = None , snake_case__ : Tuple = None , snake_case__ : Union[str, Any] = None , snake_case__ : Union[str, Any] = None , ):
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase = self.backbone.forward_with_filtered_kwargs(
snake_case__ , output_hidden_states=snake_case__ , output_attentions=snake_case__ )
lowercase = outputs.feature_maps
lowercase = self.decode_head(snake_case__ )
lowercase = nn.functional.interpolate(snake_case__ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=snake_case__ )
lowercase = None
if self.auxiliary_head is not None:
lowercase = self.auxiliary_head(snake_case__ )
lowercase = nn.functional.interpolate(
snake_case__ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=snake_case__ )
lowercase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
lowercase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase = loss_fct(snake_case__ , snake_case__ )
lowercase = loss_fct(snake_case__ , snake_case__ )
lowercase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase = (logits,) + outputs[1:]
else:
lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 702
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A_ :
_A :int
_A :int
class A_ :
def __init__( self : List[str] , snake_case__ : int ):
lowercase = [[] for _ in range(snake_case__ )]
lowercase = size
def __getitem__( self : Optional[int] , snake_case__ : int ):
return iter(self._graph[vertex] )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self._size
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int ):
lowercase = deque([start_vertex] )
lowercase = [None] * self.size
lowercase = 0
while queue:
lowercase = queue.popleft()
lowercase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase = current_distance + edge.weight
lowercase = distances[edge.destination_vertex]
if (
isinstance(snake_case__ , snake_case__ )
and new_distance >= dest_vertex_distance
):
continue
lowercase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
| 0
|
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
snake_case = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
snake_case = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
snake_case = r'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_snake_case = 0.0
for i, j in zip(__lowerCamelCase , __lowerCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(__lowerCamelCase , __lowerCamelCase ) else 0.0
_snake_case = n_correct / len(__lowerCamelCase )
return {
"accuracy": accuracy,
}
| 103
|
from collections.abc import Sequence
from queue import Queue
class __a :
def __init__( self : str , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any]=None , snake_case_ : Dict=None)-> Optional[int]:
__lowerCAmelCase =start
__lowerCAmelCase =end
__lowerCAmelCase =val
__lowerCAmelCase =(start + end) // 2
__lowerCAmelCase =left
__lowerCAmelCase =right
def __repr__( self : List[Any])-> Dict:
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class __a :
def __init__( self : List[str] , snake_case_ : Sequence , snake_case_ : int)-> str:
__lowerCAmelCase =collection
__lowerCAmelCase =function
if self.collection:
__lowerCAmelCase =self._build_tree(0 , len(snake_case_) - 1)
def UpperCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : Optional[int])-> Optional[int]:
self._update_tree(self.root , snake_case_ , snake_case_)
def UpperCamelCase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : Union[str, Any])-> Union[str, Any]:
return self._query_range(self.root , snake_case_ , snake_case_)
def UpperCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[str])-> List[Any]:
if start == end:
return SegmentTreeNode(snake_case_ , snake_case_ , self.collection[start])
__lowerCAmelCase =(start + end) // 2
__lowerCAmelCase =self._build_tree(snake_case_ , snake_case_)
__lowerCAmelCase =self._build_tree(mid + 1 , snake_case_)
return SegmentTreeNode(snake_case_ , snake_case_ , self.fn(left.val , right.val) , snake_case_ , snake_case_)
def UpperCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : int , snake_case_ : int)-> List[Any]:
if node.start == i and node.end == i:
__lowerCAmelCase =val
return
if i <= node.mid:
self._update_tree(node.left , snake_case_ , snake_case_)
else:
self._update_tree(node.right , snake_case_ , snake_case_)
__lowerCAmelCase =self.fn(node.left.val , node.right.val)
def UpperCamelCase ( self : Tuple , snake_case_ : str , snake_case_ : str , snake_case_ : List[Any])-> Optional[int]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , snake_case_ , snake_case_)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , snake_case_ , node.mid) , self._query_range(node.right , node.mid + 1 , snake_case_) , )
else:
# range in right child tree
return self._query_range(node.right , snake_case_ , snake_case_)
def UpperCamelCase ( self : List[str])-> Tuple:
if self.root is not None:
__lowerCAmelCase =Queue()
queue.put(self.root)
while not queue.empty():
__lowerCAmelCase =queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
lowercase_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 354
| 0
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : str = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__UpperCAmelCase : str = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : Dict = vqa_pipeline(UpperCAmelCase_ , top_k=1 )
self.assertEqual(
UpperCAmelCase_ , [
[{"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}],
[{"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}],
] , )
@require_torch
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__UpperCAmelCase : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__UpperCAmelCase : Optional[int] = "How many cats are there?"
__UpperCAmelCase : str = vqa_pipeline(image=UpperCAmelCase_ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
UpperCAmelCase_ , [{"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}, {"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}] )
__UpperCAmelCase : Optional[Any] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
UpperCAmelCase_ , [{"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}, {"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}] )
@slow
@require_torch
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[str] = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
__UpperCAmelCase : Tuple = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__UpperCAmelCase : Union[str, Any] = "How many cats are there?"
__UpperCAmelCase : List[Any] = vqa_pipeline(image=UpperCAmelCase_ , question=UpperCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
__UpperCAmelCase : Optional[Any] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
__UpperCAmelCase : Tuple = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
pass
| 713
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : float ):
"""simple docstring"""
return 0.0
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__UpperCAmelCase : List[str] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = 512
__UpperCAmelCase : Tuple = [1] + [0] * (size - 1)
__UpperCAmelCase : Any = [filter_type.process(_UpperCAmelCase ) for item in inputs]
__UpperCAmelCase : Optional[int] = [0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCAmelCase : int = np.abs(np.fft.fft(_UpperCAmelCase ) )
__UpperCAmelCase : int = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
__UpperCAmelCase : List[Any] = get_bounds(_UpperCAmelCase, _UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_UpperCAmelCase )
plt.show()
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Any = 512
__UpperCAmelCase : int = [1] + [0] * (size - 1)
__UpperCAmelCase : str = [filter_type.process(_UpperCAmelCase ) for item in inputs]
__UpperCAmelCase : str = [0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCAmelCase : List[str] = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_UpperCAmelCase, -2 * pi ) )
plt.show()
| 329
| 0
|
import math
from datetime import datetime, timedelta
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = year % 1_9
__magic_name__ :str = year % 4
__magic_name__ :Any = year % 7
__magic_name__ :List[Any] = math.floor(year / 1_0_0 )
__magic_name__ :Tuple = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__magic_name__ :Any = leap_day_inhibits / 4
__magic_name__ :Optional[Any] = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__magic_name__ :Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__magic_name__ :Tuple = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__magic_name__ :Any = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_lowerCamelCase, 4, 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_lowerCamelCase, 4, 1_8 )
else:
return datetime(_lowerCamelCase, 3, 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
SCREAMING_SNAKE_CASE__ : str = '''will be''' if year > datetime.now().year else '''was'''
print(f"Easter in {year} {tense} {gauss_easter(year)}")
| 0
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Optional[Any] = """summarization"""
SCREAMING_SNAKE_CASE : Any = ["""loss"""]
SCREAMING_SNAKE_CASE : Optional[int] = ROUGE_KEYS
SCREAMING_SNAKE_CASE : Optional[int] = """rouge2"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCamelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , mode=self.mode , **__SCREAMING_SNAKE_CASE )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
lowerCamelCase_ = Path(self.output_dir ) / 'metrics.json'
lowerCamelCase_ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
lowerCamelCase_ = 0
lowerCamelCase_ = defaultdict(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.config.model_type
lowerCamelCase_ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
lowerCamelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCamelCase_ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
lowerCamelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCamelCase_ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCamelCase_ = get_git_info()['repo_sha']
lowerCamelCase_ = hparams.num_workers
lowerCamelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCamelCase_ = self.decoder_start_token_id
lowerCamelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
lowerCamelCase_ = False
lowerCamelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCamelCase_ = self.hparams.eval_max_gen_length
else:
lowerCamelCase_ = self.model.config.max_length
lowerCamelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ) -> Dict[str, List[str]]:
lowerCamelCase_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(__SCREAMING_SNAKE_CASE , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
lowerCamelCase_ = True
return readable_batch
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
return self.model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] ) -> int:
lowerCamelCase_ = self.tokenizer.batch_decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
return lmap(str.strip , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : dict ) -> Tuple:
lowerCamelCase_ = self.tokenizer.pad_token_id
lowerCamelCase_ , lowerCamelCase_ = batch['input_ids'], batch['attention_mask']
lowerCamelCase_ = batch['labels']
if isinstance(self.model , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = self.model._shift_right(__SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ = shift_tokens_right(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCamelCase_ = decoder_input_ids
self.save_readable_batch(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCamelCase_ = nn.CrossEntropyLoss(ignore_index=__SCREAMING_SNAKE_CASE )
assert lm_logits.shape[-1] == self.vocab_size
lowerCamelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCamelCase_ = nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = label_smoothed_nll_loss(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.hparams.label_smoothing , ignore_index=__SCREAMING_SNAKE_CASE )
return (loss,)
@property
def UpperCamelCase ( self : Optional[Any] ) -> int:
return self.tokenizer.pad_token_id
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Dict:
lowerCamelCase_ = self._step(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = dict(zip(self.loss_names , __SCREAMING_SNAKE_CASE ) )
# tokens per batch
lowerCamelCase_ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
lowerCamelCase_ = batch['input_ids'].shape[0]
lowerCamelCase_ = batch['input_ids'].eq(self.pad ).sum()
lowerCamelCase_ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ) -> Dict:
return self._generative_step(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]="val" ) -> Dict:
self.step_count += 1
lowerCamelCase_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCamelCase_ = losses['loss']
lowerCamelCase_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
lowerCamelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCamelCase_ = torch.tensor(__SCREAMING_SNAKE_CASE ).type_as(__SCREAMING_SNAKE_CASE )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
lowerCamelCase_ = self.step_count
self.metrics[prefix].append(__SCREAMING_SNAKE_CASE ) # callback writes this to self.metrics_save_path
lowerCamelCase_ = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
return calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : dict ) -> dict:
lowerCamelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCamelCase_ = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=__SCREAMING_SNAKE_CASE , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCamelCase_ = (time.time() - ta) / batch['input_ids'].shape[0]
lowerCamelCase_ = self.ids_to_clean_text(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.ids_to_clean_text(batch['labels'] )
lowerCamelCase_ = self._step(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = dict(zip(self.loss_names , __SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = self.calc_generative_metrics(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = np.mean(lmap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
base_metrics.update(gen_time=__SCREAMING_SNAKE_CASE , gen_len=__SCREAMING_SNAKE_CASE , preds=__SCREAMING_SNAKE_CASE , target=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return base_metrics
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int ) -> Any:
return self._generative_step(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
return self.validation_epoch_end(__SCREAMING_SNAKE_CASE , prefix='test' )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] ) -> SeqaSeqDataset:
lowerCamelCase_ = self.n_obs[type_path]
lowerCamelCase_ = self.target_lens[type_path]
lowerCamelCase_ = self.dataset_class(
self.tokenizer , type_path=__SCREAMING_SNAKE_CASE , n_obs=__SCREAMING_SNAKE_CASE , max_target_length=__SCREAMING_SNAKE_CASE , **self.dataset_kwargs , )
return dataset
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> DataLoader:
lowerCamelCase_ = self.get_dataset(__SCREAMING_SNAKE_CASE )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCamelCase_ = dataset.make_sortish_sampler(__SCREAMING_SNAKE_CASE , distributed=self.hparams.gpus > 1 )
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=__SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=__SCREAMING_SNAKE_CASE , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCamelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_sampler=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=__SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=__SCREAMING_SNAKE_CASE , )
def UpperCamelCase ( self : Dict ) -> DataLoader:
lowerCamelCase_ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
return dataloader
def UpperCamelCase ( self : int ) -> DataLoader:
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase ( self : int ) -> DataLoader:
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
add_generic_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
parser.add_argument(
'--max_source_length' , default=1024 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=__SCREAMING_SNAKE_CASE )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=__SCREAMING_SNAKE_CASE )
parser.add_argument('--max_tokens_per_batch' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
parser.add_argument('--logger_name' , type=__SCREAMING_SNAKE_CASE , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=__SCREAMING_SNAKE_CASE , default=500 , required=__SCREAMING_SNAKE_CASE , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=__SCREAMING_SNAKE_CASE , default='summarization' , required=__SCREAMING_SNAKE_CASE , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=__SCREAMING_SNAKE_CASE , default=0.0 , required=__SCREAMING_SNAKE_CASE )
parser.add_argument('--src_lang' , type=__SCREAMING_SNAKE_CASE , default='' , required=__SCREAMING_SNAKE_CASE )
parser.add_argument('--tgt_lang' , type=__SCREAMING_SNAKE_CASE , default='' , required=__SCREAMING_SNAKE_CASE )
parser.add_argument('--eval_beams' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
parser.add_argument(
'--val_metric' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=__SCREAMING_SNAKE_CASE , default=1 , required=__SCREAMING_SNAKE_CASE , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Union[str, Any] = """translation"""
SCREAMING_SNAKE_CASE : List[str] = ["""loss"""]
SCREAMING_SNAKE_CASE : str = ["""bleu"""]
SCREAMING_SNAKE_CASE : Optional[int] = """bleu"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = hparams.src_lang
lowerCamelCase_ = hparams.tgt_lang
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> dict:
return calculate_bleu(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=_lowerCamelCase )
check_output_dir(_lowerCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCamelCase_ = SummarizationModule(_lowerCamelCase )
else:
lowerCamelCase_ = TranslationModule(_lowerCamelCase )
lowerCamelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
lowerCamelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase_ = os.environ.get('WANDB_PROJECT' , _lowerCamelCase )
lowerCamelCase_ = WandbLogger(name=model.output_dir.name , project=_lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase_ = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
lowerCamelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCamelCase_ = False
lowerCamelCase_ = args.val_metric == 'loss'
lowerCamelCase_ = generic_train(
_lowerCamelCase , _lowerCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowerCamelCase ) , early_stopping_callback=_lowerCamelCase , logger=_lowerCamelCase , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
lowerCamelCase_ = ''
lowerCamelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=_lowerCamelCase ) )
if checkpoints:
lowerCamelCase_ = checkpoints[-1]
lowerCamelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE : Any = pl.Trainer.add_argparse_args(parser)
_SCREAMING_SNAKE_CASE : Any = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 549
| 0
|
def _lowerCAmelCase( __A ):
UpperCAmelCase = len(__A )
for i in range(1 , __A ):
UpperCAmelCase = collection[i]
UpperCAmelCase = 0
UpperCAmelCase = i - 1
while low <= high:
UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
UpperCAmelCase = mid - 1
else:
UpperCAmelCase = mid + 1
for j in range(__A , __A , -1 ):
UpperCAmelCase = collection[j - 1]
UpperCAmelCase = val
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 1
|
import unittest
import numpy as np
def _lowerCAmelCase( __A , __A , __A , __A = None , ):
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase = (
"Expected the same number of rows for A and B. "
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase = (
"Expected the same number of columns for B and C. "
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__A )
UpperCAmelCase = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
UpperCAmelCase = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = np.block([[a, b], [b.T, c]] )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s )
def _UpperCamelCase ( self : str ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 1
| 1
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a (lowercase__ : List[Any] , lowercase__ : List[str] , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
# Load configuration defined in the metadata file
with open(lowercase__ ) as metadata_file:
__snake_case = json.load(lowercase__ )
__snake_case = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__snake_case = torch.load(lowercase__ , map_location='cpu' )
# Load the entity vocab file
__snake_case = load_entity_vocab(lowercase__ )
__snake_case = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case = AddedToken('<ent>' , lstrip=lowercase__ , rstrip=lowercase__ )
__snake_case = AddedToken('<ent2>' , lstrip=lowercase__ , rstrip=lowercase__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
__snake_case = LukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
__snake_case = state_dict['embeddings.word_embeddings.weight']
__snake_case = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__snake_case = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__snake_case = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case = f'encoder.layer.{layer_index}.attention.self.'
__snake_case = state_dict[prefix + matrix_name]
__snake_case = state_dict[prefix + matrix_name]
__snake_case = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case = state_dict['entity_embeddings.entity_embeddings.weight']
__snake_case = entity_emb[entity_vocab['[MASK]']]
__snake_case = LukeModel(config=lowercase__ ).eval()
__snake_case , __snake_case = model.load_state_dict(lowercase__ , strict=lowercase__ )
if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(lowercase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
__snake_case = LukeTokenizer.from_pretrained(lowercase__ , task='entity_classification' )
__snake_case = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__snake_case = (3_9, 4_2)
__snake_case = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors='pt' )
__snake_case = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
__snake_case = torch.Size((1, 4_2, 1_0_2_4) )
__snake_case = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
__snake_case = torch.Size((1, 4_2, 7_6_8) )
__snake_case = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__snake_case = torch.Size((1, 1, 1_0_2_4) )
__snake_case = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
__snake_case = torch.Size((1, 1, 7_6_8) )
__snake_case = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def _a (lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = {}
with open(lowercase__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(lowercase__ ):
__snake_case , __snake_case = line.rstrip().split('\t' )
__snake_case = index
return entity_vocab
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_a : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 56
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : List[Any] = tau * frequency / samplerate
_a : Tuple = sin(A )
_a : List[Any] = cos(A )
_a : Union[str, Any] = _sin / (2 * q_factor)
_a : Dict = (1 - _cos) / 2
_a : Any = 1 - _cos
_a : Any = 1 + alpha
_a : int = -2 * _cos
_a : str = 1 - alpha
_a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : int = tau * frequency / samplerate
_a : int = sin(A )
_a : Union[str, Any] = cos(A )
_a : int = _sin / (2 * q_factor)
_a : Dict = (1 + _cos) / 2
_a : int = -1 - _cos
_a : Optional[int] = 1 + alpha
_a : str = -2 * _cos
_a : Dict = 1 - alpha
_a : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : str = tau * frequency / samplerate
_a : Dict = sin(A )
_a : int = cos(A )
_a : Dict = _sin / (2 * q_factor)
_a : List[Any] = _sin / 2
_a : List[str] = 0
_a : Dict = -ba
_a : List[Any] = 1 + alpha
_a : Union[str, Any] = -2 * _cos
_a : List[Any] = 1 - alpha
_a : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : Optional[Any] = tau * frequency / samplerate
_a : Tuple = sin(A )
_a : Tuple = cos(A )
_a : Dict = _sin / (2 * q_factor)
_a : List[Any] = 1 - alpha
_a : int = -2 * _cos
_a : List[Any] = 1 + alpha
_a : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
_a : Union[str, Any] = tau * frequency / samplerate
_a : str = sin(A )
_a : str = cos(A )
_a : List[Any] = _sin / (2 * q_factor)
_a : Optional[Any] = 1_0 ** (gain_db / 4_0)
_a : Dict = 1 + alpha * big_a
_a : str = -2 * _cos
_a : Tuple = 1 - alpha * big_a
_a : Tuple = 1 + alpha / big_a
_a : str = -2 * _cos
_a : Union[str, Any] = 1 - alpha / big_a
_a : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
_a : Optional[int] = tau * frequency / samplerate
_a : List[str] = sin(A )
_a : Tuple = cos(A )
_a : Union[str, Any] = _sin / (2 * q_factor)
_a : str = 1_0 ** (gain_db / 4_0)
_a : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
_a : List[str] = (big_a + 1) + (big_a - 1) * _cos
_a : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_a : Dict = (big_a - 1) + (big_a + 1) * _cos
_a : Tuple = 2 * sqrt(A ) * alpha
_a : Any = big_a * (pmc + aaa)
_a : Optional[int] = 2 * big_a * mpc
_a : Dict = big_a * (pmc - aaa)
_a : List[str] = ppmc + aaa
_a : int = -2 * pmpc
_a : Tuple = ppmc - aaa
_a : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
_a : Dict = tau * frequency / samplerate
_a : Tuple = sin(A )
_a : Any = cos(A )
_a : int = _sin / (2 * q_factor)
_a : str = 1_0 ** (gain_db / 4_0)
_a : List[Any] = (big_a + 1) - (big_a - 1) * _cos
_a : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
_a : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_a : List[str] = (big_a - 1) + (big_a + 1) * _cos
_a : Union[str, Any] = 2 * sqrt(A ) * alpha
_a : Optional[Any] = big_a * (ppmc + aaa)
_a : List[str] = -2 * big_a * pmpc
_a : Any = big_a * (ppmc - aaa)
_a : List[Any] = pmc + aaa
_a : Tuple = 2 * mpc
_a : List[Any] = pmc - aaa
_a : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 120
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _A ( __magic_name__ , __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Any = IFInpaintingPipeline
SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 353
|
def A_ ( a , a ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def A_ ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 353
| 1
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowerCamelCase__ : List[Any] = parser.parse_args()
lowerCamelCase__ : Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 31
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 31
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase__ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class snake_case__(unittest.TestCase ):
"""simple docstring"""
lowercase_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case ( self : int ):
lowercase__ : List[str] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
lowercase__ : Optional[int] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] )
lowercase__ : int = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
lowercase__ : List[Any] = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
lowercase__ : List[Any] = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
lowercase__ : int = text_classifier("This is great !" , return_all_scores=SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] )
lowercase__ : Optional[int] = text_classifier("This is great !" , return_all_scores=SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
lowercase__ : Tuple = text_classifier(["This is great !", "Something else"] , return_all_scores=SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
lowercase__ : Optional[int] = text_classifier(["This is great !", "Something else"] , return_all_scores=SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def snake_case ( self : List[Any] ):
import torch
lowercase__ : List[Any] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
lowercase__ : Any = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def snake_case ( self : List[str] ):
lowercase__ : Tuple = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
lowercase__ : Union[str, Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def snake_case ( self : Tuple ):
lowercase__ : Optional[int] = pipeline("text-classification" )
lowercase__ : int = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 1.0}] )
lowercase__ : Any = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "NEGATIVE", "score": 1.0}] )
lowercase__ : str = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[int] = pipeline("text-classification" , framework="tf" )
lowercase__ : List[Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 1.0}] )
lowercase__ : Union[str, Any] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "NEGATIVE", "score": 1.0}] )
lowercase__ : Optional[Any] = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 0.988}] )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Union[str, Any] = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowercase__ : str = "HuggingFace is in"
lowercase__ : str = text_classifier(SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
lowercase__ : Dict = ["HuggingFace is in ", "Paris is in France"]
lowercase__ : Tuple = text_classifier(SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}, {"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowercase__ : Optional[int] = text_classifier(SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [[{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] * N, [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] * N] , )
lowercase__ : Union[str, Any] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
lowercase__ : Dict = text_classifier(SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowercase__ : Optional[int] = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(SCREAMING_SNAKE_CASE ):
text_classifier(SCREAMING_SNAKE_CASE )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowercase__ : Tuple = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 712
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def snake_case ( self : Dict ):
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
lowercase__ : List[Any] = dtype
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
if common is None:
lowercase__ : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
return sample
def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : Tuple = state.common.alphas_cumprod[t]
lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : List[Any] = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = timestep
if key is None:
lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : Dict = state.common.alphas_cumprod[t]
lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Optional[Any] = 1 - alpha_prod_t
lowercase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 81
| 0
|
UpperCAmelCase_ = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def lowerCAmelCase_ ( __UpperCAmelCase: float ) -> str:
assert type(__UpperCAmelCase ) in (int, float) and decimal == int(__UpperCAmelCase )
UpperCamelCase__ : Tuple = int(__UpperCAmelCase )
UpperCamelCase__ : Any = ''''''
UpperCamelCase__ : Dict = False
if decimal < 0:
UpperCamelCase__ : Optional[int] = True
decimal *= -1
while decimal > 0:
UpperCamelCase__ ,UpperCamelCase__ : List[str] = divmod(__UpperCAmelCase , 16 )
UpperCamelCase__ : Dict = values[remainder] + hexadecimal
UpperCamelCase__ : int = '''0x''' + hexadecimal
if negative:
UpperCamelCase__ : int = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase__ :
'''simple docstring'''
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = UNetaDConditionModel(
sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
], mid_block_type='''UNetMidBlock2DSimpleCrossAttn''', up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='''text''', addition_embed_type_num_heads=2, cross_attention_norm='''group_norm''', resnet_time_scale_shift='''scale_shift''', act_fn='''gelu''', )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase__ : Dict = DDPMScheduler(
num_train_timesteps=1000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0001, beta_end=0.02, thresholding=__magic_name__, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type='''epsilon''', variance_type='''learned_range''', )
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = UNetaDConditionModel(
sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
], mid_block_type='''UNetMidBlock2DSimpleCrossAttn''', up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='''text''', addition_embed_type_num_heads=2, cross_attention_norm='''group_norm''', resnet_time_scale_shift='''scale_shift''', act_fn='''gelu''', class_embed_type='''timestep''', mid_block_scale_factor=1.414, time_embedding_act_fn='''gelu''', time_embedding_dim=32, )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = DDPMScheduler(
num_train_timesteps=1000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0001, beta_end=0.02, thresholding=__magic_name__, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type='''epsilon''', variance_type='''learned_range''', )
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0001, beta_end=0.02, )
torch.manual_seed(0 )
UpperCamelCase__ : str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : str = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : Any = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : Tuple = inputs['''prompt''']
UpperCamelCase__ : Optional[Any] = inputs['''generator''']
UpperCamelCase__ : Union[str, Any] = inputs['''num_inference_steps''']
UpperCamelCase__ : Dict = inputs['''output_type''']
if "image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['''image''']
else:
UpperCamelCase__ : Any = None
if "mask_image" in inputs:
UpperCamelCase__ : List[str] = inputs['''mask_image''']
else:
UpperCamelCase__ : Union[str, Any] = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['''original_image''']
else:
UpperCamelCase__ : Tuple = None
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = pipe.encode_prompt(__magic_name__ )
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase__ : int = image
if mask_image is not None:
UpperCamelCase__ : List[Any] = mask_image
if original_image is not None:
UpperCamelCase__ : Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__magic_name__, __magic_name__, __magic_name__ )
UpperCamelCase__ : Union[str, Any] = pipe(**__magic_name__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__magic_name__ )
UpperCamelCase__ : Any = self.pipeline_class.from_pretrained(__magic_name__ )
pipe_loaded.to(__magic_name__ )
pipe_loaded.set_progress_bar_config(disable=__magic_name__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__magic_name__, __magic_name__ ) is None, f"`{optional_component}` did not stay set to None after loading.", )
UpperCamelCase__ : List[str] = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : int = inputs['''generator''']
UpperCamelCase__ : Union[str, Any] = inputs['''num_inference_steps''']
UpperCamelCase__ : Union[str, Any] = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Optional[Any] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase__ : List[Any] = image
if mask_image is not None:
UpperCamelCase__ : List[str] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : str = pipe_loaded(**__magic_name__ )[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(__magic_name__ ) - to_np(__magic_name__ ) ).max()
self.assertLess(__magic_name__, 1E-4 )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.get_dummy_components()
UpperCamelCase__ : Optional[Any] = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : Dict = pipe(**__magic_name__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__magic_name__ )
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(__magic_name__ )
pipe_loaded.to(__magic_name__ )
pipe_loaded.set_progress_bar_config(disable=__magic_name__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCamelCase__ : str = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : Optional[Any] = pipe_loaded(**__magic_name__ )[0]
UpperCamelCase__ : str = np.abs(to_np(__magic_name__ ) - to_np(__magic_name__ ) ).max()
self.assertLess(__magic_name__, 1E-4 )
| 253
| 1
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> List[str]:
A__ = inspect.getfile(accelerate.test_utils )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
A__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def snake_case__ ( self ) -> Tuple:
print(f'''Found {torch.cuda.device_count()} devices.''' )
A__ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase ,env=os.environ.copy() )
@require_multi_gpu
def snake_case__ ( self ) -> Optional[Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
A__ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase ,env=os.environ.copy() )
@require_multi_gpu
def snake_case__ ( self ) -> str:
A__ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase ,env=os.environ.copy() )
@require_multi_gpu
def snake_case__ ( self ) -> Tuple:
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
A__ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(__UpperCAmelCase ,env=os.environ.copy() )
if __name__ == "__main__":
__lowerCamelCase = Accelerator()
__lowerCamelCase = (accelerator.state.process_index + 2, 10)
__lowerCamelCase = torch.randint(0, 10, shape).to(accelerator.device)
__lowerCamelCase = ""
__lowerCamelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__lowerCamelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__lowerCamelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 536
|
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__lowerCamelCase = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
__lowerCamelCase = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__lowerCamelCase = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
__lowerCamelCase = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
__lowerCamelCase = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
__lowerCamelCase = ""
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ , match=re.escape(expected_error.format(path='root' ) ) ):
A__ = ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ , suppress_parsing_errors=UpperCamelCase__ )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
A__ = ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
A__ = expected_error.format(path=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
A__ = ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
A__ = expected_error.format(path=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ , suppress_parsing_errors=UpperCamelCase__ )
| 536
| 1
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =FunnelTokenizer
lowerCamelCase__ =FunnelTokenizerFast
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE : List[str] = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __UpperCamelCase ( self : List[Any] , **a : List[str] ) -> str:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self : int , **a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self : Optional[int] , a : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE : Optional[Any] = "unwanted, running"
return input_text, output_text
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : List[Any] = tokenizer("UNwant\u00E9d,running" )
SCREAMING_SNAKE_CASE : Any = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : Dict = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 25
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
snake_case_ : List[str] = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : List[Any] = use_input_lengths
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Tuple = gelu_activation
snake_case_ : Optional[Any] = sinusoidal_embeddings
snake_case_ : str = causal
snake_case_ : Dict = asm
snake_case_ : Optional[Any] = n_langs
snake_case_ : Optional[int] = vocab_size
snake_case_ : Tuple = n_special
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Optional[int] = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : str = num_labels
snake_case_ : Optional[int] = num_choices
snake_case_ : List[Any] = summary_type
snake_case_ : Optional[int] = use_proj
snake_case_ : Union[str, Any] = scope
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Union[str, Any] = None
if self.use_input_lengths:
snake_case_ : List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ : Any = None
snake_case_ : Union[str, Any] = None
snake_case_ : Dict = None
if self.use_labels:
snake_case_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self ) -> List[str]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
snake_case_ : List[Any] = FlaubertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Tuple:
snake_case_ : List[Any] = FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
snake_case_ : Optional[Any] = FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Tuple = model(_SCREAMING_SNAKE_CASE )
snake_case_ : int = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[Any]:
snake_case_ : int = FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Tuple = model(_SCREAMING_SNAKE_CASE )
snake_case_ : Any = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
snake_case_ : Dict = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((snake_case_) , ) : List[str] = result_with_labels.to_tuple()
snake_case_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((snake_case_) , ) : Tuple = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Tuple:
snake_case_ : Tuple = FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Dict:
snake_case_ : int = self.num_labels
snake_case_ : List[str] = FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Dict:
snake_case_ : str = self.num_choices
snake_case_ : Any = FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : int = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : str = config_and_inputs
snake_case_ : List[str] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A : Tuple = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
snake_case_ : Tuple = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
snake_case_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Optional[Any] = FlaubertModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> List[str]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[int] = FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ , snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
snake_case_ : Optional[int] = True
snake_case_ : str = model_class(config=_SCREAMING_SNAKE_CASE )
snake_case_ : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : int = torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , "traced_model.pt" ) )
snake_case_ : Tuple = torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , "traced_model.pt" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["input_ids"].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["attention_mask"].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Optional[Any] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
snake_case_ : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )[0]
snake_case_ : List[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 568
| 0
|
from math import pow
def lowerCamelCase__ ( _A , _A , _A , _A , _A , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
snake_case_ = int(pow(_A , _A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
snake_case_ , snake_case_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
snake_case_ , snake_case_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
return current_sum, solutions_count
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(_A , _A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowercase__ : Any = namedtuple("CoinsDistribResult", "moves excess")
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_A ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_A ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_A ) != count_coins(_A ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_A ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case_ , snake_case_ = get_distrib(node.left )
snake_case_ , snake_case_ = get_distrib(node.right )
snake_case_ = 1 - left_distrib_excess
snake_case_ = 1 - right_distrib_excess
snake_case_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(_A )
+ abs(_A )
)
snake_case_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_A , _A )
return get_distrib(_A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
| 1
|
'''simple docstring'''
import numpy as np
import qiskit
def a ( lowerCamelCase__ = 8 , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : Tuple = np.random.default_rng(seed=_UpperCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
A_ : int = 6 * key_len
# Measurement basis for Alice's qubits.
A_ : str = rng.integers(2 , size=_UpperCAmelCase )
# The set of states Alice will prepare.
A_ : Optional[int] = rng.integers(2 , size=_UpperCAmelCase )
# Measurement basis for Bob's qubits.
A_ : List[Any] = rng.integers(2 , size=_UpperCAmelCase )
# Quantum Circuit to simulate BB84
A_ : Optional[int] = qiskit.QuantumCircuit(_UpperCAmelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_UpperCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_UpperCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_UpperCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_UpperCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_UpperCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
A_ : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
A_ : Optional[int] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1 , seed_simulator=_UpperCAmelCase )
# Returns the result of measurement.
A_ : Any = job.result().get_counts(_UpperCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
A_ : str = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
A_ : List[Any] = gen_key[:key_len] if len(_UpperCAmelCase ) >= key_len else gen_key.ljust(_UpperCAmelCase , """0""" )
return key
if __name__ == "__main__":
print(F"The generated key is : {bbaa(8, seed=0)}")
from doctest import testmod
testmod()
| 667
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
A__ : List[str] = logging.getLogger()
def a_ ( _UpperCAmelCase : int ) -> Optional[int]:
__snake_case : List[str] = {}
__snake_case : List[str] = os.path.join(_UpperCAmelCase ,'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase ,'r' ) as f:
__snake_case : Dict = json.load(_UpperCAmelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
A__ : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
import xla_spawn
__snake_case : int = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(__a , 'argv' , __a ):
__snake_case : Tuple = time()
xla_spawn.main()
__snake_case : Optional[int] = time()
__snake_case : List[Any] = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
import xla_spawn
__snake_case : Optional[Any] = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(__a , 'argv' , __a ):
xla_spawn.main()
| 286
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase ( _snake_case : List[str] ):
'''simple docstring'''
lowercase__ = 384
lowercase__ = 7
if "tiny" in model_name:
lowercase__ = 96
lowercase__ = (2, 2, 6, 2)
lowercase__ = (3, 6, 12, 24)
elif "small" in model_name:
lowercase__ = 96
lowercase__ = (2, 2, 18, 2)
lowercase__ = (3, 6, 12, 24)
elif "base" in model_name:
lowercase__ = 128
lowercase__ = (2, 2, 18, 2)
lowercase__ = (4, 8, 16, 32)
lowercase__ = 12
lowercase__ = 512
elif "large" in model_name:
lowercase__ = 192
lowercase__ = (2, 2, 18, 2)
lowercase__ = (6, 12, 24, 48)
lowercase__ = 12
lowercase__ = 768
# set label information
lowercase__ = 150
lowercase__ = "huggingface/label-files"
lowercase__ = "ade20k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="dataset" ) ,"r" ) )
lowercase__ = {int(_snake_case ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["stage1", "stage2", "stage3", "stage4"] ,)
lowercase__ = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowerCamelCase ( _snake_case : List[Any] ):
'''simple docstring'''
lowercase__ = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : str ,_snake_case : List[str] ):
'''simple docstring'''
lowercase__ = dct.pop(_snake_case )
lowercase__ = val
def lowerCamelCase ( _snake_case : Any ,_snake_case : List[Any] ):
'''simple docstring'''
lowercase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
lowercase__ = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:dim, :]
lowercase__ = in_proj_bias[: dim]
lowercase__ = in_proj_weight[
dim : dim * 2, :
]
lowercase__ = in_proj_bias[
dim : dim * 2
]
lowercase__ = in_proj_weight[
-dim :, :
]
lowercase__ = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase ( _snake_case : Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ = x.shape
lowercase__ = x.reshape(_snake_case ,4 ,in_channel // 4 )
lowercase__ = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowerCamelCase ( _snake_case : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = x.shape
lowercase__ = x.reshape(_snake_case ,in_channel // 4 ,4 )
lowercase__ = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowerCamelCase ( _snake_case : int ):
'''simple docstring'''
lowercase__ = x.shape[0]
lowercase__ = x.reshape(4 ,in_channel // 4 )
lowercase__ = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowerCamelCase ( _snake_case : Tuple ):
'''simple docstring'''
lowercase__ = x.shape[0]
lowercase__ = x.reshape(in_channel // 4 ,4 )
lowercase__ = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : Union[str, Any] ,_snake_case : Optional[int] ):
'''simple docstring'''
lowercase__ = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
lowercase__ = model_name_to_url[model_name]
lowercase__ = torch.hub.load_state_dict_from_url(_snake_case ,map_location="cpu" ,file_name=_snake_case )[
"state_dict"
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
lowercase__ = get_upernet_config(_snake_case )
lowercase__ = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(_snake_case )
if "bn" in key:
lowercase__ = key.replace("bn" ,"batch_norm" )
lowercase__ = val
# rename keys
lowercase__ = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase__ = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
lowercase__ = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
lowercase__ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowercase__ = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("RGB" )
lowercase__ = SegformerImageProcessor()
lowercase__ = processor(_snake_case ,return_tensors="pt" ).pixel_values
with torch.no_grad():
lowercase__ = model(_snake_case )
lowercase__ = outputs.logits
print(logits.shape )
print("First values of logits:" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase__ = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
lowercase__ = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
lowercase__ = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
lowercase__ = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("Logits:" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 539
|
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
SCREAMING_SNAKE_CASE__ = "1"
SCREAMING_SNAKE_CASE__ = "0"
SCREAMING_SNAKE_CASE__ = "1"
SCREAMING_SNAKE_CASE__ = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
SCREAMING_SNAKE_CASE__ = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
SCREAMING_SNAKE_CASE__ = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
SCREAMING_SNAKE_CASE__ = ort.RunOptions()
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = np.ones((batch, sequence), dtype=np.intaa)
SCREAMING_SNAKE_CASE__ = np.ones((batch, sequence), dtype=np.intaa)
SCREAMING_SNAKE_CASE__ = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = 2000
SCREAMING_SNAKE_CASE__ = {}
for iter in range(max_iters):
SCREAMING_SNAKE_CASE__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1000 / max_iters))
| 539
| 1
|
"""simple docstring"""
import os
from pathlib import Path
def _lowercase ( ) -> List[str]:
from torch.utils.cpp_extension import load
__lowerCAmelCase : List[Any] = Path(lowerCamelCase__ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
__lowerCAmelCase : Any = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" ,"ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" ,"ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" ,lowerCamelCase__ ,with_cuda=lowerCamelCase__ ,extra_include_paths=[str(lowerCamelCase__ )] ,extra_cflags=["-DWITH_CUDA=1"] ,extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] ,)
import MultiScaleDeformableAttention as MSDA
return MSDA
| 293
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> Any:
_SCREAMING_SNAKE_CASE : Any = SwinvaConfig()
_SCREAMING_SNAKE_CASE : List[str] = swinva_name.split("_" )
_SCREAMING_SNAKE_CASE : Dict = name_split[1]
if "to" in name_split[3]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(name_split[3][-3:] )
else:
_SCREAMING_SNAKE_CASE : Tuple = int(name_split[3] )
if "to" in name_split[2]:
_SCREAMING_SNAKE_CASE : str = int(name_split[2][-2:] )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(name_split[2][6:] )
if model_size == "tiny":
_SCREAMING_SNAKE_CASE : List[Any] = 9_6
_SCREAMING_SNAKE_CASE : List[Any] = (2, 2, 6, 2)
_SCREAMING_SNAKE_CASE : List[str] = (3, 6, 1_2, 2_4)
elif model_size == "small":
_SCREAMING_SNAKE_CASE : Optional[int] = 9_6
_SCREAMING_SNAKE_CASE : Optional[Any] = (2, 2, 1_8, 2)
_SCREAMING_SNAKE_CASE : List[str] = (3, 6, 1_2, 2_4)
elif model_size == "base":
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1_2_8
_SCREAMING_SNAKE_CASE : List[str] = (2, 2, 1_8, 2)
_SCREAMING_SNAKE_CASE : List[str] = (4, 8, 1_6, 3_2)
else:
_SCREAMING_SNAKE_CASE : Dict = 1_9_2
_SCREAMING_SNAKE_CASE : Optional[Any] = (2, 2, 1_8, 2)
_SCREAMING_SNAKE_CASE : List[str] = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
_SCREAMING_SNAKE_CASE : Optional[int] = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_SCREAMING_SNAKE_CASE : Tuple = 2_1_8_4_1
_SCREAMING_SNAKE_CASE : Optional[int] = "huggingface/label-files"
_SCREAMING_SNAKE_CASE : int = "imagenet-22k-id2label.json"
_SCREAMING_SNAKE_CASE : str = json.load(open(hf_hub_download(lowerCamelCase__, lowerCamelCase__, repo_type="dataset" ), "r" ) )
_SCREAMING_SNAKE_CASE : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : int = idalabel
_SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in idalabel.items()}
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_0_0_0
_SCREAMING_SNAKE_CASE : Dict = "huggingface/label-files"
_SCREAMING_SNAKE_CASE : Any = "imagenet-1k-id2label.json"
_SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(lowerCamelCase__, lowerCamelCase__, repo_type="dataset" ), "r" ) )
_SCREAMING_SNAKE_CASE : Dict = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : int = idalabel
_SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : List[str] = img_size
_SCREAMING_SNAKE_CASE : List[Any] = num_classes
_SCREAMING_SNAKE_CASE : List[Any] = embed_dim
_SCREAMING_SNAKE_CASE : List[Any] = depths
_SCREAMING_SNAKE_CASE : List[Any] = num_heads
_SCREAMING_SNAKE_CASE : int = window_size
return config
def _lowerCAmelCase ( lowerCamelCase__ : Dict ) -> List[str]:
if "patch_embed.proj" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("patch_embed.norm", "embeddings.norm" )
if "layers" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = "encoder." + name
if "attn.proj" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("attn", "attention.self" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("mlp.fc2", "output.dense" )
if "q_bias" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("q_bias", "query.bias" )
if "k_bias" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace("k_bias", "key.bias" )
if "v_bias" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("v_bias", "value.bias" )
if "cpb_mlp" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("cpb_mlp", "continuous_position_bias_mlp" )
if name == "norm.weight":
_SCREAMING_SNAKE_CASE : str = "layernorm.weight"
if name == "norm.bias":
_SCREAMING_SNAKE_CASE : Tuple = "layernorm.bias"
if "head" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("head", "classifier" )
else:
_SCREAMING_SNAKE_CASE : Tuple = "swinv2." + name
return name
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : int ) -> Tuple:
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE : List[str] = orig_state_dict.pop(lowerCamelCase__ )
if "mask" in key:
continue
elif "qkv" in key:
_SCREAMING_SNAKE_CASE : Any = key.split("." )
_SCREAMING_SNAKE_CASE : Optional[Any] = int(key_split[1] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_split[3] )
_SCREAMING_SNAKE_CASE : Tuple = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_SCREAMING_SNAKE_CASE : Optional[int] = val[:dim, :]
_SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE : List[str] = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE : List[Any] = val[:dim]
_SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:]
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = val
return orig_state_dict
def _lowerCAmelCase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : str ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = timm.create_model(lowerCamelCase__, pretrained=lowerCamelCase__ )
timm_model.eval()
_SCREAMING_SNAKE_CASE : Tuple = get_swinva_config(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : int = SwinvaForImageClassification(lowerCamelCase__ )
model.eval()
_SCREAMING_SNAKE_CASE : int = convert_state_dict(timm_model.state_dict(), lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_", "-" ) ) )
_SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw )
_SCREAMING_SNAKE_CASE : Dict = image_processor(images=lowerCamelCase__, return_tensors="pt" )
_SCREAMING_SNAKE_CASE : Tuple = timm_model(inputs["pixel_values"] )
_SCREAMING_SNAKE_CASE : Dict = model(**lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1E-3 )
print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase__ )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase__, lowerCamelCase__ ), organization="nandwalritik", commit_message="Add model", )
if __name__ == "__main__":
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase_ : Optional[int] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 572
| 0
|
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCAmelCase_ = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase = 14 ) -> None:
'''simple docstring'''
if group not in primes:
raise ValueError("Unsupported Group" )
UpperCamelCase : int = primes[group]["prime"]
UpperCamelCase : List[str] = primes[group]["generator"]
UpperCamelCase : Tuple = int(hexlify(urandom(32 ) ) , base=16 )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return hex(self.__private_key )[2:]
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Tuple = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase )[2:]
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> bool:
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> str:
'''simple docstring'''
UpperCamelCase : List[str] = int(lowerCamelCase , base=16 )
if not self.is_valid_public_key(lowerCamelCase ):
raise ValueError("Invalid public key" )
UpperCamelCase : str = pow(lowerCamelCase , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase , lowerCamelCase ) -> bool:
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase , (prime - 1) // 2 , lowerCamelCase ) == 1
)
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 14 ) -> str:
'''simple docstring'''
UpperCamelCase : str = int(lowerCamelCase , base=16 )
UpperCamelCase : int = int(lowerCamelCase , base=16 )
UpperCamelCase : Any = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase , lowerCamelCase ):
raise ValueError("Invalid public key" )
UpperCamelCase : Tuple = pow(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 435
|
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = 50 , lowerCamelCase = "pil" , lowerCamelCase = True , **lowerCamelCase , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
UpperCamelCase : Dict = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase , )
UpperCamelCase : Optional[int] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase : Dict = self.unet(lowerCamelCase , lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
UpperCamelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase : List[Any] = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCamelCase ), "This is a local test"
| 435
| 1
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCAmelCase_ : List[Any] = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 365
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = 'https://openaipublic.azureedge.net/jukebox/models/'
UpperCAmelCase_ : Tuple = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _lowercase ( UpperCamelCase__ : str ):
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
__A : str = key.replace('.model.1.bias', '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
__A : Dict = key.replace('.model.1.weight', '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
__A : Optional[int] = key.replace('.model.3.bias', '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
__A : str = key.replace('.model.3.weight', '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
__A : Tuple = key.replace('conditioner_blocks.0', 'conditioner_blocks' )
if "prime_prior" in key:
__A : str = key.replace('prime_prior', 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__A : str = key.replace('.emb.', '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k', '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.', 'metadata_embedding.' )
if "x_emb.emb." in key:
__A : str = key.replace('0.x_emb.emb', 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln', 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln', '.layer_norm' )
if "_ln" in key:
return key.replace('_ln', '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj', 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out', 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out', 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb', 'embed_tokens' )
return key
def _lowercase ( UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[Any] ):
__A : Union[str, Any] = {}
import re
__A : List[Any] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__A : Dict = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : Optional[int] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__A : List[str] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__A : str = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : str = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__A : Optional[int] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
__A : Union[str, Any] = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : List[Any] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(UpperCamelCase__ ):
__A : Any = re_encoder_block_conv_in.match(UpperCamelCase__ )
__A : int = regex_match.groups()
__A : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
__A : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__A : int = re_encoder_block_conv_in.sub(UpperCamelCase__, UpperCamelCase__ )
elif re_encoder_block_resnet.fullmatch(UpperCamelCase__ ):
__A : Any = re_encoder_block_resnet.match(UpperCamelCase__ )
__A : Any = regex_match.groups()
__A : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] )
__A : str = {'1': 1, '3': 2}[groups[-2]]
__A : Optional[int] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__A : str = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__A : List[Any] = prefix + resnet_block
__A : Union[str, Any] = re_encoder_block_resnet.sub(UpperCamelCase__, UpperCamelCase__ )
elif re_encoder_block_proj_out.fullmatch(UpperCamelCase__ ):
__A : List[str] = re_encoder_block_proj_out.match(UpperCamelCase__ )
__A : Optional[int] = regex_match.groups()
__A : Any = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__A : Optional[Any] = re_encoder_block_proj_out.sub(UpperCamelCase__, UpperCamelCase__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(UpperCamelCase__ ):
__A : Dict = re_decoder_block_conv_out.match(UpperCamelCase__ )
__A : int = regex_match.groups()
__A : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : Optional[Any] = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__A : Optional[Any] = re_decoder_block_conv_out.sub(UpperCamelCase__, UpperCamelCase__ )
elif re_decoder_block_resnet.fullmatch(UpperCamelCase__ ):
__A : List[str] = re_decoder_block_resnet.match(UpperCamelCase__ )
__A : List[str] = regex_match.groups()
__A : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : Optional[Any] = {'1': 1, '3': 2}[groups[-2]]
__A : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__A : Dict = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__A : Union[str, Any] = prefix + resnet_block
__A : Optional[int] = re_decoder_block_resnet.sub(UpperCamelCase__, UpperCamelCase__ )
elif re_decoder_block_proj_in.fullmatch(UpperCamelCase__ ):
__A : int = re_decoder_block_proj_in.match(UpperCamelCase__ )
__A : List[str] = regex_match.groups()
__A : Tuple = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__A : Optional[Any] = re_decoder_block_proj_in.sub(UpperCamelCase__, UpperCamelCase__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(UpperCamelCase__ ):
__A : Optional[int] = re_prior_cond_conv_out.match(UpperCamelCase__ )
__A : Union[str, Any] = regex_match.groups()
__A : Dict = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : List[str] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__A : List[str] = re_prior_cond_conv_out.sub(UpperCamelCase__, UpperCamelCase__ )
elif re_prior_cond_resnet.fullmatch(UpperCamelCase__ ):
__A : str = re_prior_cond_resnet.match(UpperCamelCase__ )
__A : Dict = regex_match.groups()
__A : Dict = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : Dict = {'1': 1, '3': 2}[groups[-2]]
__A : Optional[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__A : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__A : Union[str, Any] = prefix + resnet_block
__A : str = re_prior_cond_resnet.sub(UpperCamelCase__, UpperCamelCase__ )
elif re_prior_cond_proj_in.fullmatch(UpperCamelCase__ ):
__A : Tuple = re_prior_cond_proj_in.match(UpperCamelCase__ )
__A : Optional[Any] = regex_match.groups()
__A : Optional[int] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__A : List[Any] = re_prior_cond_proj_in.sub(UpperCamelCase__, UpperCamelCase__ )
# keep original key
else:
__A : List[Any] = original_key
__A : int = replace_key(UpperCamelCase__ )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
__A : Optional[Any] = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__A : Union[str, Any] = original_key
__A : Optional[int] = original_key
__A : Optional[Any] = value
return new_dict
@torch.no_grad()
def _lowercase ( UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
__A : Any = requests.get(f"""{PREFIX}{file}""", allow_redirects=UpperCamelCase__ )
os.makedirs(f"""{pytorch_dump_folder_path}/""", exist_ok=UpperCamelCase__ )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""", 'wb' ).write(r.content )
__A : str = MODEL_MAPPING[model_name.split('/' )[-1]]
__A : Optional[Any] = JukeboxConfig.from_pretrained(UpperCamelCase__ )
__A : Tuple = JukeboxModel(UpperCamelCase__ )
__A : List[Any] = []
__A : int = {}
for i, dict_name in enumerate(UpperCamelCase__ ):
__A : List[Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
__A : Dict = {}
for k in old_dic.keys():
if k.endswith('.b' ):
__A : str = old_dic[k]
elif k.endswith('.w' ):
__A : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__A : Optional[Any] = old_dic[k]
else:
__A : str = old_dic[k]
__A : List[Any] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
__A : Any = fix_jukebox_keys(UpperCamelCase__, model.state_dict(), UpperCamelCase__, UpperCamelCase__ )
weight_dict.append(UpperCamelCase__ )
__A : Union[str, Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
with open(f"""{pytorch_dump_folder_path}/mapping.json""", 'w' ) as txtfile:
json.dump(UpperCamelCase__, UpperCamelCase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 365
| 1
|
from __future__ import annotations
lowercase = list[tuple[int, int]]
lowercase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a , a , a , a , a , ) -> List[Any]:
snake_case_ = pos_x
snake_case_ = pos_y
snake_case_ = (pos_y, pos_x)
snake_case_ = goal_x
snake_case_ = goal_y
snake_case_ = g_cost
snake_case_ = parent
snake_case_ = self.calculate_heuristic()
def _UpperCamelCase ( self ) -> float:
snake_case_ = abs(self.pos_x - self.goal_x )
snake_case_ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , a ) -> bool:
return self.f_cost < other.f_cost
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a ) -> Optional[Any]:
snake_case_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a )
snake_case_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , a )
snake_case_ = [self.start]
snake_case_ = []
snake_case_ = False
def _UpperCamelCase ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case_ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case_ = True
return self.retrace_path(a )
self.closed_nodes.append(a )
snake_case_ = self.get_successors(a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a )
else:
# retrieve the best current path
snake_case_ = self.open_nodes.pop(self.open_nodes.index(a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a )
else:
self.open_nodes.append(a )
if not self.reached:
return [self.start.pos]
return None
def _UpperCamelCase ( self , a ) -> list[Node]:
snake_case_ = []
for action in delta:
snake_case_ = parent.pos_x + action[1]
snake_case_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a , a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a , ) )
return successors
def _UpperCamelCase ( self , a ) -> Path:
snake_case_ = node
snake_case_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case_ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase = (0, 0)
lowercase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase = GreedyBestFirst(init, goal)
lowercase = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase = 2
for elem in grid:
print(elem)
| 720
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
lowercase = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
lowercase = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
lowercase = BeautifulSoup(res.text, "html.parser")
lowercase = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 607
| 0
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE__ : List[str] = Lock()
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0, 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__magic_name__ :List[str] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__magic_name__ :Dict = min(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__magic_name__ :Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__magic_name__ :List[str] = max(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__SCREAMING_SNAKE_CASE )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = []
__magic_name__ :int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__magic_name__ :Optional[Any] = Pipe()
__magic_name__ :List[Any] = Pipe()
process_array_.append(
Process(
target=__SCREAMING_SNAKE_CASE, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) )
__magic_name__ :Dict = temp_rs
__magic_name__ :List[str] = temp_rr
for i in range(1, len(__SCREAMING_SNAKE_CASE ) - 1 ):
__magic_name__ :List[Any] = Pipe()
__magic_name__ :str = Pipe()
process_array_.append(
Process(
target=__SCREAMING_SNAKE_CASE, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) )
__magic_name__ :str = temp_rs
__magic_name__ :Optional[Any] = temp_rr
process_array_.append(
Process(
target=__SCREAMING_SNAKE_CASE, args=(
len(__SCREAMING_SNAKE_CASE ) - 1,
arr[len(__SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__SCREAMING_SNAKE_CASE ) - 1],
), ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0, len(__SCREAMING_SNAKE_CASE ) ):
__magic_name__ :Optional[int] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = list(range(1_0, 0, -1 ) )
print('''Initial List''' )
print(*__SCREAMING_SNAKE_CASE )
__magic_name__ :str = odd_even_transposition(__SCREAMING_SNAKE_CASE )
print('''Sorted List\n''' )
print(*__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 0
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Any = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
_SCREAMING_SNAKE_CASE : int = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
os.makedirs(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model.state_dict()
def to_tf_var_name(__SCREAMING_SNAKE_CASE ):
for patt, repl in iter(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return F"""bert/{name}"""
def create_tf_var(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = tf.dtypes.as_dtype(tensor.dtype )
_SCREAMING_SNAKE_CASE : Tuple = tf.get_variable(dtype=__SCREAMING_SNAKE_CASE , shape=tensor.shape , name=__SCREAMING_SNAKE_CASE , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__SCREAMING_SNAKE_CASE )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_SCREAMING_SNAKE_CASE : Optional[int] = to_tf_var_name(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_SCREAMING_SNAKE_CASE : List[str] = torch_tensor.T
_SCREAMING_SNAKE_CASE : List[str] = create_tf_var(tensor=__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE , session=__SCREAMING_SNAKE_CASE )
tf.keras.backend.set_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = session.run(__SCREAMING_SNAKE_CASE )
print(F"""Successfully created {tf_name}: {np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Directory in which to save tensorflow model""" )
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__SCREAMING_SNAKE_CASE , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 338
| 0
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _snake_case ( _a ):
_A : Any = ['''image_processor''', '''tokenizer''']
_A : str = '''OwlViTImageProcessor'''
_A : List[str] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : List[str]=None ,**SCREAMING_SNAKE_CASE__ : Dict ):
SCREAMING_SNAKE_CASE:Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE:Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __call__( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="max_length" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="np" ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) or (isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) and not isinstance(text[0] ,SCREAMING_SNAKE_CASE__ )):
SCREAMING_SNAKE_CASE:Tuple = [self.tokenizer(SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )]
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) and isinstance(text[0] ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Any = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE:Optional[int] = max([len(SCREAMING_SNAKE_CASE__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(SCREAMING_SNAKE_CASE__ ) != max_num_queries:
SCREAMING_SNAKE_CASE:int = t + [" "] * (max_num_queries - len(SCREAMING_SNAKE_CASE__ ))
SCREAMING_SNAKE_CASE:Dict = self.tokenizer(SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
encodings.append(SCREAMING_SNAKE_CASE__ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE:Optional[Any] = np.concatenate([encoding["input_ids"] for encoding in encodings] ,axis=0 )
SCREAMING_SNAKE_CASE:Union[str, Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] ,axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE:Union[str, Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] ,axis=0 )
SCREAMING_SNAKE_CASE:Union[str, Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] ,axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.cat([encoding["input_ids"] for encoding in encodings] ,dim=0 )
SCREAMING_SNAKE_CASE:List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] ,dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE:List[str] = tf.stack([encoding["input_ids"] for encoding in encodings] ,axis=0 )
SCREAMING_SNAKE_CASE:List[str] = tf.stack([encoding["attention_mask"] for encoding in encodings] ,axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
SCREAMING_SNAKE_CASE:int = BatchEncoding()
SCREAMING_SNAKE_CASE:List[str] = input_ids
SCREAMING_SNAKE_CASE:List[Any] = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE:Optional[Any] = BatchEncoding()
SCREAMING_SNAKE_CASE:Dict = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ).pixel_values
SCREAMING_SNAKE_CASE:Dict = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE:Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE:List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE:int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) ,tensor_type=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Tuple ):
return self.image_processor.post_process(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : str ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : Tuple ):
return self.image_processor.post_process_object_detection(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : str ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[str] ):
return self.image_processor.post_process_image_guided_detection(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : Dict ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
@property
def __UpperCamelCase ( self : Any ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor_class
@property
def __UpperCamelCase ( self : Any ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor
| 465
|
'''simple docstring'''
def A_ ( snake_case = 100 ):
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Optional[int] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 465
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(_lowerCamelCase ) )]
def lowerCamelCase_( _lowerCamelCase ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_lowerCamelCase : List[str] = all_rotations(_lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowerCamelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowerCamelCase ),
}
return response
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_lowerCamelCase : Tuple = int(_lowerCamelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(_lowerCamelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_lowerCamelCase : Dict = [""] * len(_lowerCamelCase )
for _ in range(len(_lowerCamelCase ) ):
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_lowerCAmelCase : Any = '''Provide a string that I will generate its BWT transform: '''
_lowerCAmelCase : Optional[Any] = input(entry_msg).strip()
_lowerCAmelCase : List[str] = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
_lowerCAmelCase : str = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
)
| 46
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : int = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 587
| 0
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : Tuple =tempfile.mkdtemp()
lowercase : str =5
# Realm tok
lowercase : Optional[int] =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase : Optional[int] =os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowercase : Optional[Any] =os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase : Dict =os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
def A__ ( self : Any ) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =RealmConfig(num_block_records=self.num_block_records )
return config
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
lowercase : Optional[Any] =np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase , )
return block_records
def A__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def A__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =self.get_config()
lowercase : Tuple =self.get_dummy_retriever()
lowercase : int =retriever.tokenizer
lowercase : Any =np.array([0, 3] , dtype='''long''' )
lowercase : Optional[int] =tokenizer(['''Test question'''] ).input_ids
lowercase : Optional[int] =tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ).input_ids
lowercase : List[Any] =config.reader_seq_len
lowercase , lowercase , lowercase , lowercase : Dict =retriever(
UpperCAmelCase , UpperCAmelCase , answer_ids=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
lowercase : List[str] =self.get_config()
lowercase : Optional[Any] =self.get_dummy_retriever()
lowercase : int =retriever.tokenizer
lowercase : Optional[Any] =np.array([0, 3, 5] , dtype='''long''' )
lowercase : Union[str, Any] =tokenizer(['''Test question'''] ).input_ids
lowercase : Tuple =tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ).input_ids
lowercase : Tuple =config.reader_seq_len
lowercase , lowercase , lowercase , lowercase : int =retriever(
UpperCAmelCase , UpperCAmelCase , answer_ids=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
lowercase : List[Any] =retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
lowercase : Dict =os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
lowercase : Dict =RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 8
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8
| 1
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _UpperCamelCase ( A ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : str = self._create_example_records()
__lowerCamelCase : List[Any] = Dataset.from_list(_lowerCamelCase )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_lowerCamelCase ):
self.assertDictEqual(_lowerCamelCase , example_records[i] )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : str = self._create_example_records()
__lowerCamelCase : List[str] = Dataset.from_list(_lowerCamelCase )
__lowerCamelCase : Any = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _snake_case ( self : Any ): # checks what happens with missing columns
'''simple docstring'''
__lowerCamelCase : int = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__lowerCamelCase : Optional[Any] = Dataset.from_list(_lowerCamelCase )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def _snake_case ( self : List[str] ): # checks if the type can be inferred from the second record
'''simple docstring'''
__lowerCamelCase : Optional[int] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__lowerCamelCase : Dict = Dataset.from_list(_lowerCamelCase )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = Dataset.from_list([] )
self.assertEqual(len(_lowerCamelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 519
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__UpperCamelCase : List[Any] = ['bert-base-uncased', 'bert-base-cased']
__UpperCamelCase : int = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class _UpperCamelCase ( tf.keras.Model ):
'''simple docstring'''
def __init__( self : int , _lowerCamelCase : int ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : Optional[Any] = tokenizer
__lowerCamelCase : List[str] = AutoConfig.from_pretrained(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = TFAutoModel.from_config(_lowerCamelCase )
def _snake_case ( self : List[str] , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.tokenizer(_lowerCamelCase )
__lowerCamelCase : Optional[int] = self.bert(**_lowerCamelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
__lowerCamelCase : Dict = [
BertTokenizer.from_pretrained(_lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__lowerCamelCase : int = [TFBertTokenizer.from_pretrained(_lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_lowerCamelCase , use_fast_bert_tokenizer=_lowerCamelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase : Optional[int] = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
__lowerCamelCase : Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowerCamelCase : List[str] = tokenizer(_lowerCamelCase , return_tensors="""tf""" , padding="""longest""" )
__lowerCamelCase : List[str] = tf_tokenizer(_lowerCamelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase : Union[str, Any] = tf_tokenizer(self.paired_sentences )
__lowerCamelCase : List[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase : Dict = tf.function(_lowerCamelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowerCamelCase : List[Any] = tf.constant(_lowerCamelCase )
__lowerCamelCase : Dict = compiled_tokenizer(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = tf_tokenizer(_lowerCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase : List[Any] = ModelToSave(tokenizer=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = tf.convert_to_tensor(self.test_sentences )
__lowerCamelCase : Optional[Any] = model(_lowerCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase : List[str] = Path(_lowerCamelCase ) / """saved.model"""
model.save(_lowerCamelCase )
__lowerCamelCase : Optional[int] = tf.keras.models.load_model(_lowerCamelCase )
__lowerCamelCase : str = loaded_model(_lowerCamelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 519
| 1
|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def A__ ( UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
A = AutoConfig.from_pretrained(UpperCamelCase , **UpperCamelCase )
A = AutoModelForSeqaSeqLM.from_config(UpperCamelCase )
model.save_pretrained(UpperCamelCase )
AutoTokenizer.from_pretrained(UpperCamelCase ).save_pretrained(UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 711
|
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = ProphetNetTokenizer
UpperCamelCase = False
def lowerCamelCase ( self :Any ):
super().setUp()
A = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase ( self :Any , __UpperCamelCase :List[str] ):
A = "UNwant\u00E9d,running"
A = "unwanted, running"
return input_text, output_text
def lowerCamelCase ( self :Optional[Any] ):
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__UpperCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase ( self :Optional[int] ):
A = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowerCamelCase ( self :List[Any] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase ( self :Any ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowerCamelCase ( self :Tuple ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase ( self :Optional[int] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase ( self :Dict ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase ( self :List[Any] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase ( self :Optional[Any] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase ( self :Dict ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowerCamelCase ( self :List[Any] ):
A = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
A = {}
for i, token in enumerate(__UpperCamelCase ):
A = i
A = WordpieceTokenizer(vocab=__UpperCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
A = ["A long paragraph for summarization.", "Another paragraph for summarization."]
A = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
A = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors="pt" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCamelCase ( self :Optional[Any] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowerCamelCase ( self :Any ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowerCamelCase ( self :List[Any] ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def lowerCamelCase ( self :Dict ):
A = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
A = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCamelCase )
A = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCamelCase )
A = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
A = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 524
| 0
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 464
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class A ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , lowercase_ : int ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] =3
_lowerCamelCase : Dict =250
_lowerCamelCase : Tuple =ids_tensor((batch_size, length) , lowercase_ )
_lowerCamelCase : str =torch.ones((batch_size, length) , device=lowercase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] =self._get_tensors(5 )
_lowerCamelCase : Dict =StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : List[str] =self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Optional[int] =self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Any =MaxLengthCriteria(max_length=10 )
_lowerCamelCase , _lowerCamelCase : List[str] =self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Optional[int] =self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Optional[int] =self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_lowerCamelCase , _lowerCamelCase : str =self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] =self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Dict =self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase : Optional[Any] =StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] =self._get_tensors(5 )
_lowerCamelCase : Tuple =MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase : int =MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowercase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_lowerCamelCase : Optional[Any] =validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowercase_ ) , 1 )
| 464
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
__snake_case : Dict = feature_size
__snake_case : Union[str, Any] = sampling_rate
__snake_case : Optional[Any] = padding_value
__snake_case : Optional[Any] = kwargs.pop('padding_side' , 'right' )
__snake_case : str = kwargs.pop('return_attention_mask' , _lowerCAmelCase )
super().__init__(**_lowerCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
if isinstance(_lowerCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case : List[str] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
__snake_case : Tuple = processed_features[self.model_input_names[0]]
__snake_case : Optional[Any] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_lowerCAmelCase ) == 0:
if return_attention_mask:
__snake_case : List[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case : Dict = required_input[0]
if isinstance(_lowerCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case : Union[str, Any] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_lowerCAmelCase ):
__snake_case : str = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_lowerCAmelCase ):
__snake_case : List[Any] = 'tf'
elif is_torch_tensor(_lowerCAmelCase ):
__snake_case : List[str] = 'pt'
elif isinstance(_lowerCAmelCase , (int, float, list, tuple, np.ndarray) ):
__snake_case : Union[str, Any] = 'np'
else:
raise ValueError(
F"""type of {first_element} unknown: {type(_lowerCAmelCase )}. """
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case : List[str] = to_numpy(_lowerCAmelCase )
else:
__snake_case : str = [to_numpy(_lowerCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case : Optional[int] = self._get_padding_strategies(padding=_lowerCAmelCase , max_length=_lowerCAmelCase )
__snake_case : Dict = processed_features[self.model_input_names[0]]
__snake_case : int = len(_lowerCAmelCase )
if not all(len(_lowerCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
__snake_case : List[Any] = []
for i in range(_lowerCAmelCase ):
__snake_case : str = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case : Optional[Any] = self._truncate(
_lowerCAmelCase , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
truncated_inputs.append(_lowerCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case : int = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case : int = PaddingStrategy.MAX_LENGTH
__snake_case : Union[str, Any] = {}
for i in range(_lowerCAmelCase ):
# padding
__snake_case : str = self._pad(
truncated_inputs[i] , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case : List[str] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case : Optional[Any] = value.astype(np.floataa )
batch_outputs[key].append(_lowerCAmelCase )
return BatchFeature(_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
__snake_case : str = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case : Any = len(_lowerCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_lowerCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case : List[Any] = np.ones(len(_lowerCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case : List[Any] = max_length - len(_lowerCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
__snake_case : Optional[int] = np.pad(
processed_features['attention_mask'] , (0, difference) )
__snake_case : Optional[int] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case : List[str] = np.pad(
_lowerCAmelCase , _lowerCAmelCase , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case : Optional[Any] = np.pad(
processed_features['attention_mask'] , (difference, 0) )
__snake_case : Optional[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case : List[Any] = np.pad(
_lowerCAmelCase , _lowerCAmelCase , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
__snake_case : Any = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case : str = len(_lowerCAmelCase ) > max_length
if needs_to_be_truncated:
__snake_case : Dict = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case : Any = processed_features['attention_mask'][:max_length]
return processed_features
def lowercase_ ( self , _UpperCAmelCase=False , _UpperCAmelCase=None ):
if padding is not False:
if padding is True:
__snake_case : List[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Tuple = PaddingStrategy(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Optional[int] = padding
else:
__snake_case : Dict = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 709
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ = ""
else:
UpperCAmelCase_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ):
UpperCAmelCase_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCAmelCase_ = 8
# set labels if required
if not base_model:
UpperCAmelCase_ = 1000
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCAmelCase_ = 384
UpperCAmelCase_ = 1536
UpperCAmelCase_ = 12
UpperCAmelCase_ = 6
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("facebookresearch/dino:main" , lowerCAmelCase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase__ )
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ , base_model=lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# load HuggingFace model
if base_model:
UpperCAmelCase_ = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ ).eval()
else:
UpperCAmelCase_ = ViTForImageClassification(lowerCAmelCase__ ).eval()
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCAmelCase_ = ViTImageProcessor()
UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
UpperCAmelCase_ = model(lowerCAmelCase__ )
if base_model:
UpperCAmelCase_ = original_model(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
UpperCAmelCase_ = original_model(lowerCAmelCase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
lowerCamelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 82
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : int = IFImgaImgSuperResolutionPipeline
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
UpperCamelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __snake_case ( self ):
return self._get_superresolution_dummy_components()
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : Optional[int] = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : int = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __snake_case ( self ):
self._test_save_load_local()
def __snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 110
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Tuple = """yolos"""
def __init__( self , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=[5_1_2, 8_6_4] , __UpperCAmelCase=1_6 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = hidden_size
lowerCAmelCase__ :Union[str, Any] = num_hidden_layers
lowerCAmelCase__ :Tuple = num_attention_heads
lowerCAmelCase__ :List[str] = intermediate_size
lowerCAmelCase__ :Dict = hidden_act
lowerCAmelCase__ :List[str] = hidden_dropout_prob
lowerCAmelCase__ :List[str] = attention_probs_dropout_prob
lowerCAmelCase__ :List[str] = initializer_range
lowerCAmelCase__ :Any = layer_norm_eps
lowerCAmelCase__ :Dict = image_size
lowerCAmelCase__ :str = patch_size
lowerCAmelCase__ :Any = num_channels
lowerCAmelCase__ :Dict = qkv_bias
lowerCAmelCase__ :Tuple = num_detection_tokens
lowerCAmelCase__ :Optional[int] = use_mid_position_embeddings
lowerCAmelCase__ :Tuple = auxiliary_loss
# Hungarian matcher
lowerCAmelCase__ :Dict = class_cost
lowerCAmelCase__ :Optional[int] = bbox_cost
lowerCAmelCase__ :Dict = giou_cost
# Loss coefficients
lowerCAmelCase__ :Optional[Any] = bbox_loss_coefficient
lowerCAmelCase__ :Union[str, Any] = giou_loss_coefficient
lowerCAmelCase__ :Optional[int] = eos_coefficient
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[Any] = version.parse("""1.11""" )
@property
def snake_case ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case ( self ):
'''simple docstring'''
return 1_2
| 560
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = (UnCLIPScheduler,)
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**__UpperCAmelCase )
return config
def snake_case ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__UpperCAmelCase , prev_timestep=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ :Tuple = self.get_scheduler_config(variance_type='fixed_small_log' )
lowerCAmelCase__ :int = scheduler_class(**__UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_99_49_87 ) ) < 1E-5
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.scheduler_classes[0]
lowerCAmelCase__ :List[Any] = self.get_scheduler_config(variance_type='learned_range' )
lowerCAmelCase__ :Any = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 0.5
assert scheduler._get_variance(1 , predicted_variance=__UpperCAmelCase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=__UpperCAmelCase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=__UpperCAmelCase ) - -0.0_01_00_11 < 1E-5
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ :Any = self.get_scheduler_config()
lowerCAmelCase__ :Any = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ :str = scheduler.timesteps
lowerCAmelCase__ :Dict = self.dummy_model()
lowerCAmelCase__ :Optional[Any] = self.dummy_sample_deter
lowerCAmelCase__ :Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(__UpperCAmelCase ):
# 1. predict noise residual
lowerCAmelCase__ :Any = model(__UpperCAmelCase , __UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ :Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
lowerCAmelCase__ :Dict = pred_prev_sample
lowerCAmelCase__ :Tuple = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ :List[str] = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ :Tuple = self.get_scheduler_config()
lowerCAmelCase__ :str = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(2_5 )
lowerCAmelCase__ :List[Any] = scheduler.timesteps
lowerCAmelCase__ :Union[str, Any] = self.dummy_model()
lowerCAmelCase__ :List[Any] = self.dummy_sample_deter
lowerCAmelCase__ :int = torch.manual_seed(0 )
for i, t in enumerate(__UpperCAmelCase ):
# 1. predict noise residual
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase__ :Optional[Any] = None
else:
lowerCAmelCase__ :int = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ :Any = scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , prev_timestep=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
lowerCAmelCase__ :Dict = pred_prev_sample
lowerCAmelCase__ :int = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ :Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
| 560
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Tuple , ) -> int:
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Tuple = 13
lowerCAmelCase_ : Optional[Any] = 7
lowerCAmelCase_ : str = True
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : str = True
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Tuple = 2
lowerCAmelCase_ : Optional[int] = 99
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 32
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = 4
lowerCAmelCase_ : Dict = 0.1
lowerCAmelCase_ : int = 0.1
lowerCAmelCase_ : Any = 5_12
lowerCAmelCase_ : Tuple = 16
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Tuple = 0.02
lowerCAmelCase_ : Dict = 3
lowerCAmelCase_ : Optional[Any] = 4
lowerCAmelCase_ : Tuple = """last"""
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Tuple = 0
def __lowercase ( self : List[str] ) -> Optional[int]:
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCAmelCase_ : Union[str, Any] = None
if self.use_input_lengths:
lowerCAmelCase_ : Dict = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : Any = None
if self.use_labels:
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : str = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase ( self : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , ) -> Optional[int]:
lowerCAmelCase_ : List[str] = TFFlaubertModel(config=lowerCamelCase )
lowerCAmelCase_ : str = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowerCAmelCase_ : int = model(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = [input_ids, input_mask]
lowerCAmelCase_ : Tuple = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : int , ) -> Any:
lowerCAmelCase_ : Optional[Any] = TFFlaubertWithLMHeadModel(lowerCamelCase )
lowerCAmelCase_ : Tuple = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowerCAmelCase_ : int = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : Tuple , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , ) -> Dict:
lowerCAmelCase_ : Optional[int] = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase )
lowerCAmelCase_ : List[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowerCAmelCase_ : Any = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase )
lowerCAmelCase_ : Tuple = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowerCAmelCase_ : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , ) -> int:
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase )
lowerCAmelCase_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase_ : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : str , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , ) -> Optional[Any]:
lowerCAmelCase_ : List[Any] = self.num_choices
lowerCAmelCase_ : str = TFFlaubertForMultipleChoice(config=lowerCamelCase )
lowerCAmelCase_ : Dict = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ : Tuple = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ : Union[str, Any] = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ : List[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCAmelCase_ : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self : int ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
),
) : Union[str, Any] = config_and_inputs
lowerCAmelCase_ : str = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def __lowercase ( self : Tuple , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase ( self : List[str] ) -> Optional[Any]:
lowerCAmelCase_ : Any = TFFlaubertModelTester(self )
lowerCAmelCase_ : Dict = ConfigTester(self , config_class=lowerCamelCase , emb_dim=37 )
def __lowercase ( self : str ) -> Tuple:
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase )
def __lowercase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase )
def __lowercase ( self : Tuple ) -> Any:
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase )
def __lowercase ( self : str ) -> List[Any]:
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase )
def __lowercase ( self : Dict ) -> Dict:
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> int:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase )
@slow
def __lowercase ( self : str ) -> List[Any]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Any = TFFlaubertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@slow
def __lowercase ( self : int ) -> int:
lowerCAmelCase_ : Dict = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
lowerCAmelCase_ : Any = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCAmelCase_ : Union[str, Any] = model(lowerCamelCase )[0]
lowerCAmelCase_ : Any = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase )
# compare the actual values for a slice.
lowerCAmelCase_ : List[str] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 275
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase_ ( A__ : np.ndarray ):
'''simple docstring'''
lowerCAmelCase_, lowerCAmelCase_ : List[str] = np.shape(A__ )
if rows != columns:
lowerCAmelCase_ : List[str] = (
"""'table' has to be of square shaped array but got a """
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(A__ )
lowerCAmelCase_ : Optional[int] = np.zeros((rows, columns) )
lowerCAmelCase_ : Tuple = np.zeros((rows, columns) )
for i in range(A__ ):
for j in range(A__ ):
lowerCAmelCase_ : str = sum(lower[i][k] * upper[k][j] for k in range(A__ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowerCAmelCase_ : Union[str, Any] = (table[i][j] - total) / upper[j][j]
lowerCAmelCase_ : Dict = 1
for j in range(A__ , A__ ):
lowerCAmelCase_ : str = sum(lower[i][k] * upper[k][j] for k in range(A__ ) )
lowerCAmelCase_ : List[str] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
| 1
|
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowercase = get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=None) -> Tuple:
"""simple docstring"""
a_ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__"):
setattr(self , lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_))
a_ =module._original_module if isinstance(lowerCAmelCase_ , _PatchedModuleObj) else module
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : List[str] = []
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None) -> Tuple:
"""simple docstring"""
a_ =obj
a_ =target
a_ =new
a_ =target.split(".")[0]
a_ ={}
a_ =attrs or []
def __enter__( self) -> str:
"""simple docstring"""
*a_ , a_ =self.target.split(".")
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase_)):
try:
a_ =import_module(".".join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a_ =getattr(self.obj , lowerCAmelCase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a_ =obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase_ , _PatchedModuleObj(lowerCAmelCase_ , attrs=self.attrs))
a_ =getattr(self.obj , lowerCAmelCase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , _PatchedModuleObj(getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , attrs=self.attrs))
a_ =getattr(lowerCAmelCase_ , lowerCAmelCase_)
# finally set the target attribute
setattr(lowerCAmelCase_ , lowerCAmelCase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a_ =getattr(import_module(".".join(lowerCAmelCase_)) , lowerCAmelCase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase_) is attr_value:
a_ =getattr(self.obj , lowerCAmelCase_)
setattr(self.obj , lowerCAmelCase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a_ =globals()["__builtins__"][target_attr]
setattr(self.obj , lowerCAmelCase_ , self.new)
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
for attr in list(self.original):
setattr(self.obj , lowerCAmelCase_ , self.original.pop(lowerCAmelCase_))
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 41
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 1
|
from __future__ import annotations
lowerCamelCase__ = 10
def UpperCamelCase ( snake_case__ : list[int] ):
'''simple docstring'''
__snake_case :str = 1
__snake_case :str = max(snake_case__ )
while placement <= max_digit:
# declare and initialize empty buckets
__snake_case :list[list] = [[] for _ in range(snake_case__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__snake_case :Union[str, Any] = int((i / placement) % RADIX )
buckets[tmp].append(snake_case__ )
# put each buckets' contents into list_of_ints
__snake_case :List[Any] = 0
for b in range(snake_case__ ):
for i in buckets[b]:
__snake_case :List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 455
|
from collections.abc import Callable
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ = None ) -> None:
'''simple docstring'''
__snake_case :list = []
# Stores indexes of each item for supporting updates and deletion.
__snake_case :dict = {}
# Stores current size of heap.
__snake_case :List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__snake_case :Optional[Any] = key or (lambda a__ : x)
def __lowercase ( self , a__ ) -> int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowercase ( self , a__ ) -> int | None:
'''simple docstring'''
__snake_case :int = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowercase ( self , a__ ) -> int | None:
'''simple docstring'''
__snake_case :Optional[int] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowercase ( self , a__ , a__ ) -> None:
'''simple docstring'''
__snake_case , __snake_case :Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__snake_case , __snake_case :Tuple = self.arr[j], self.arr[i]
def __lowercase ( self , a__ , a__ ) -> bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowercase ( self , a__ ) -> int:
'''simple docstring'''
__snake_case :int = self._left(a__ )
__snake_case :Optional[int] = self._right(a__ )
__snake_case :List[Any] = i
if left is not None and not self._cmp(a__ , a__ ):
__snake_case :int = left
if right is not None and not self._cmp(a__ , a__ ):
__snake_case :Tuple = right
return valid_parent
def __lowercase ( self , a__ ) -> None:
'''simple docstring'''
__snake_case :int = self._parent(a__ )
while parent is not None and not self._cmp(a__ , a__ ):
self._swap(a__ , a__ )
__snake_case , __snake_case :str = parent, self._parent(a__ )
def __lowercase ( self , a__ ) -> None:
'''simple docstring'''
__snake_case :List[Any] = self._get_valid_parent(a__ )
while valid_parent != index:
self._swap(a__ , a__ )
__snake_case , __snake_case :Optional[int] = valid_parent, self._get_valid_parent(a__ )
def __lowercase ( self , a__ , a__ ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
__snake_case :List[Any] = self.pos_map[item]
__snake_case :Optional[Any] = [item, self.key(a__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(a__ )
self._heapify_down(a__ )
def __lowercase ( self , a__ ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
__snake_case :Dict = self.pos_map[item]
del self.pos_map[item]
__snake_case :Dict = self.arr[self.size - 1]
__snake_case :List[str] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(a__ )
self._heapify_down(a__ )
def __lowercase ( self , a__ , a__ ) -> None:
'''simple docstring'''
__snake_case :Optional[int] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(a__ )] )
else:
__snake_case :Optional[Any] = [item, self.key(a__ )]
__snake_case :str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowercase ( self ) -> tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowercase ( self ) -> tuple | None:
'''simple docstring'''
__snake_case :Optional[Any] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def UpperCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 455
| 1
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def __lowercase ( _a , _a , _a ):
snake_case_ : str = [0] * no_of_processes
snake_case_ : Optional[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_a ):
snake_case_ : Union[str, Any] = burst_time[i]
snake_case_ : list[int] = []
snake_case_ : Union[str, Any] = 0
snake_case_ : Dict = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case_ : int = []
snake_case_ : Union[str, Any] = -1
for i in range(_a ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_a )
if len(_a ) > 0:
snake_case_ : Union[str, Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case_ : Any = i
total_time += burst_time[target_process]
completed += 1
snake_case_ : Tuple = 0
snake_case_ : Tuple = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __lowercase ( _a , _a , _a ):
snake_case_ : Dict = [0] * no_of_processes
for i in range(_a ):
snake_case_ : int = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
lowercase__ : Tuple = 4
lowercase__ : List[Any] = [2, 5, 3, 7]
lowercase__ : List[str] = [0, 0, 0, 0]
lowercase__ : Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase__ : Any = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
f'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(f'\nAverage waiting time = {mean(waiting_time):.5f}')
print(f'Average turnaround time = {mean(turn_around_time):.5f}')
| 485
|
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __lowercase ( _a , _a ):
# For applying gaussian function for each element in matrix.
snake_case_ : Dict = math.sqrt(_a )
snake_case_ : Tuple = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __lowercase ( _a , _a , _a , _a ):
snake_case_ : Optional[int] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __lowercase ( _a , _a ):
# Creates a gaussian kernel of given dimension.
snake_case_ : Union[str, Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _a ):
for j in range(0 , _a ):
snake_case_ : Union[str, Any] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_a , _a )
def __lowercase ( _a , _a , _a , _a , ):
snake_case_ : int = np.zeros(img.shape )
snake_case_ : str = get_gauss_kernel(_a , _a )
snake_case_, snake_case_ : Any = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
snake_case_ : List[Any] = get_slice(_a , _a , _a , _a )
snake_case_ : Union[str, Any] = img_s - img_s[kernel_size // 2, kernel_size // 2]
snake_case_ : Optional[Any] = vec_gaussian(_a , _a )
snake_case_ : Dict = np.multiply(_a , _a )
snake_case_ : Dict = np.multiply(_a , _a )
snake_case_ : List[Any] = np.sum(_a ) / np.sum(_a )
snake_case_ : Dict = val
return imga
def __lowercase ( _a ):
snake_case_ : Optional[int] = args[1] if args[1:] else '''../image_data/lena.jpg'''
snake_case_ : int = float(args[2] ) if args[2:] else 1.0
snake_case_ : Dict = float(args[3] ) if args[3:] else 1.0
if args[4:]:
snake_case_ : Optional[Any] = int(args[4] )
snake_case_ : Union[str, Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
snake_case_ : Union[str, Any] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ : Tuple = parse_args(sys.argv)
lowercase__ : str = cva.imread(filename, 0)
cva.imshow('''input image''', img)
lowercase__ : Tuple = img / 2_55
lowercase__ : Optional[int] = out.astype('''float32''')
lowercase__ : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ : List[str] = out * 2_55
lowercase__ : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 485
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : List[str] =VOCAB_FILES_NAMES
__lowerCAmelCase : Union[str, Any] =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Any =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : List[str] =VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[int] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Any =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Dict =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_SCREAMING_SNAKE_CASE = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_SCREAMING_SNAKE_CASE = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __call__( self :Optional[Any], snake_case :List[Any], snake_case :Optional[str] = None, snake_case :Optional[str] = None, snake_case :Union[bool, str] = False, snake_case :Union[bool, str] = False, snake_case :Optional[int] = None, snake_case :Optional[Union[str, TensorType]] = None, snake_case :Optional[bool] = None, **snake_case :Any, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
snake_case, padding=snake_case, truncation=snake_case, max_length=snake_case, return_tensors=snake_case, return_attention_mask=snake_case, **snake_case, )
elif titles is None or texts is None:
_lowercase =titles if texts is None else texts
return super().__call__(
snake_case, snake_case, padding=snake_case, truncation=snake_case, max_length=snake_case, return_tensors=snake_case, return_attention_mask=snake_case, **snake_case, )
_lowercase =titles if not isinstance(snake_case, snake_case) else [titles]
_lowercase =texts if not isinstance(snake_case, snake_case) else [texts]
_lowercase =len(snake_case)
_lowercase =questions if not isinstance(snake_case, snake_case) else [questions] * n_passages
if len(snake_case) != len(snake_case):
raise ValueError(
f'''There should be as many titles than texts but got {len(snake_case)} titles and {len(snake_case)} texts.''')
_lowercase =super().__call__(snake_case, snake_case, padding=snake_case, truncation=snake_case)['input_ids']
_lowercase =super().__call__(snake_case, add_special_tokens=snake_case, padding=snake_case, truncation=snake_case)['input_ids']
_lowercase ={
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case, snake_case)
]
}
if return_attention_mask is not False:
_lowercase =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowercase =attention_mask
return self.pad(snake_case, padding=snake_case, max_length=snake_case, return_tensors=snake_case)
def UpperCamelCase__ ( self :Union[str, Any], snake_case :BatchEncoding, snake_case :DPRReaderOutput, snake_case :int = 16, snake_case :int = 64, snake_case :int = 4, ):
"""simple docstring"""
_lowercase =reader_input['input_ids']
_lowercase , _lowercase , _lowercase =reader_output[:3]
_lowercase =len(snake_case)
_lowercase =sorted(range(snake_case), reverse=snake_case, key=relevance_logits.__getitem__)
_lowercase =[]
for doc_id in sorted_docs:
_lowercase =list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowercase =sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase =sequence_ids.index(self.pad_token_id)
else:
_lowercase =len(snake_case)
_lowercase =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=snake_case, top_spans=snake_case, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=snake_case, start_index=snake_case, end_index=snake_case, text=self.decode(sequence_ids[start_index : end_index + 1]), ))
if len(snake_case) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase__ ( self :Tuple, snake_case :List[int], snake_case :List[int], snake_case :int, snake_case :int, ):
"""simple docstring"""
_lowercase =[]
for start_index, start_score in enumerate(snake_case):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowercase =sorted(snake_case, key=lambda snake_case: x[1], reverse=snake_case)
_lowercase =[]
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''')
_lowercase =end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(snake_case) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE_ ( _a , _a ):
"""simple docstring"""
__lowerCAmelCase : List[str] =VOCAB_FILES_NAMES
__lowerCAmelCase : str =READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : int =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Tuple =READER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] =['''input_ids''', '''attention_mask''']
| 181
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =0
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =CLIPConfig()
# Create a dummy config file with image_proceesor_type
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_lowercase =AutoImageProcessor.from_pretrained(snake_case).to_dict()
config_dict.pop('image_processor_type')
_lowercase =CLIPImageProcessor(**snake_case)
# save in new folder
model_config.save_pretrained(snake_case)
config.save_pretrained(snake_case)
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
# make sure private variable is not incorrectly saved
_lowercase =json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, 'clip-base is not a local folder and is not a valid model identifier'):
_lowercase =AutoImageProcessor.from_pretrained('clip-base')
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
_lowercase =AutoImageProcessor.from_pretrained(snake_case, revision='aaaaaa')
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.', ):
_lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model')
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
with self.assertRaises(snake_case):
_lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case):
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case)
_lowercase =AutoImageProcessor.from_pretrained(snake_case, trust_remote_code=snake_case)
self.assertEqual(reloaded_image_processor.__class__.__name__, 'NewImageProcessor')
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
try:
AutoConfig.register('custom', snake_case)
AutoImageProcessor.register(snake_case, snake_case)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case):
AutoImageProcessor.register(snake_case, snake_case)
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
_lowercase =CustomImageProcessor.from_pretrained(snake_case)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case)
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self :str):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =True
try:
AutoConfig.register('custom', snake_case)
AutoImageProcessor.register(snake_case, snake_case)
# If remote code is not set, the default is to use local
_lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
self.assertTrue(not hasattr(snake_case, 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 181
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
lowercase = "upernet"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=[1, 2, 3, 6] , UpperCamelCase__=True , UpperCamelCase__=0.4 , UpperCamelCase__=384 , UpperCamelCase__=256 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=255 , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = backbone_config.get("""model_type""" )
A_ = CONFIG_MAPPING[backbone_model_type]
A_ = config_class.from_dict(UpperCamelCase__ )
A_ = backbone_config
A_ = hidden_size
A_ = initializer_range
A_ = pool_scales
A_ = use_auxiliary_head
A_ = auxiliary_loss_weight
A_ = auxiliary_in_channels
A_ = auxiliary_channels
A_ = auxiliary_num_convs
A_ = auxiliary_concat_input
A_ = loss_ignore_index
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = copy.deepcopy(self.__dict__ )
A_ = self.backbone_config.to_dict()
A_ = self.__class__.model_type
return output
| 667
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667
| 1
|
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :str) -> list[int]:
return [ord(a_) - 96 for elem in plain]
def __A ( a_ :list[int]) -> str:
return "".join(chr(elem + 96) for elem in encoded)
def __A ( ) -> None:
__a : Dict = encode(input('''-> ''').strip().lower())
print('''Encoded: ''' , a_)
print('''Decoded:''' , decode(a_))
if __name__ == "__main__":
main()
| 52
|
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 1
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase__ : List[Any] = object()
# For specifying empty leaf dict `{}`
lowercase__ : str = object()
def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(_UpperCamelCase ) - len(_UpperCamelCase ) + 1 ):
UpperCAmelCase_ = [x.match(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , ks[i:] )]
if matches and all(_UpperCamelCase ):
return True
return False
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
def replace(_UpperCamelCase : int , _UpperCamelCase : Optional[int] ):
for rule, replacement in rules:
if _match(_UpperCamelCase , _UpperCamelCase ):
return replacement
return val
return replace
def __lowerCamelCase ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , _UpperCamelCase )),
(("transformer", "wte", "embedding"), P('''mp''' , _UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCamelCase , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , _UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_UpperCamelCase , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , _UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = _get_partition_rules()
UpperCAmelCase_ = _replacement_rules(_UpperCamelCase )
UpperCAmelCase_ = {k: _unmatched for k in flatten_dict(_UpperCamelCase )}
UpperCAmelCase_ = {k: replace(_UpperCamelCase , _UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_UpperCamelCase ) )
| 700
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ):
'''simple docstring'''
UpperCAmelCase_ = ''''''
for word_or_phrase in separated:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43
| 0
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCAmelCase = HUGGINGFACE_HUB_CACHE
lowerCAmelCase = """config.json"""
lowerCAmelCase = """diffusion_pytorch_model.bin"""
lowerCAmelCase = """diffusion_flax_model.msgpack"""
lowerCAmelCase = """model.onnx"""
lowerCAmelCase = """diffusion_pytorch_model.safetensors"""
lowerCAmelCase = """weights.pb"""
lowerCAmelCase = """https://huggingface.co"""
lowerCAmelCase = default_cache_path
lowerCAmelCase = """diffusers_modules"""
lowerCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCAmelCase = ["""fp16""", """non-ema"""]
lowerCAmelCase = """.self_attn"""
| 525
|
'''simple docstring'''
def __A ( a_ : list[list[float]] ):
lowerCAmelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a_ ):
if len(a_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a_ ) )
return data_lists
def __A ( a_ : list[list[float]] ,a_ : list[int] ):
lowerCAmelCase : list[list[float]] = []
for dlist, weight in zip(a_ ,a_ ):
lowerCAmelCase : Optional[Any] = min(a_ )
lowerCAmelCase : List[str] = max(a_ )
lowerCAmelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCAmelCase : List[Any] = f'''Invalid weight of {weight:f} provided'''
raise ValueError(a_ )
score_lists.append(a_ )
return score_lists
def __A ( a_ : list[list[float]] ):
lowerCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a_ ):
lowerCAmelCase : Optional[Any] = final_scores[j] + ele
return final_scores
def __A ( a_ : list[list[float]] ,a_ : list[int] ):
lowerCAmelCase : Union[str, Any] = get_data(a_ )
lowerCAmelCase : List[Any] = calculate_each_score(a_ ,a_ )
lowerCAmelCase : Optional[Any] = generate_final_scores(a_ )
# append scores to source data
for i, ele in enumerate(a_ ):
source_data[i].append(a_ )
return source_data
| 525
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : List[Any] ="""bridgetower_vision_model"""
def __init__( self , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=288 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_A= hidden_size
_A= num_hidden_layers
_A= num_channels
_A= patch_size
_A= image_size
_A= initializer_factor
_A= layer_norm_eps
_A= stop_gradient
_A= share_layernorm
_A= remove_last_layer
@classmethod
def a__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
_A, _A= cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get('model_type' ) == "bridgetower":
_A= config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : List[Any] ="""bridgetower_text_model"""
def __init__( self , lowerCAmelCase__=50265 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=1 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=514 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_A= vocab_size
_A= hidden_size
_A= num_hidden_layers
_A= num_attention_heads
_A= hidden_act
_A= initializer_factor
_A= intermediate_size
_A= hidden_dropout_prob
_A= attention_probs_dropout_prob
_A= max_position_embeddings
_A= type_vocab_size
_A= layer_norm_eps
_A= position_embedding_type
_A= use_cache
_A= pad_token_id
_A= bos_token_id
_A= eos_token_id
@classmethod
def a__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
_A, _A= cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get('model_type' ) == "bridgetower":
_A= config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : Dict ="""bridgetower"""
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=768 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=False , lowerCAmelCase__="add" , lowerCAmelCase__=12 , lowerCAmelCase__=6 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
# TODO: remove this once the Hub files are updated.
_A= kwargs.pop('text_config_dict' , lowerCAmelCase__ )
_A= kwargs.pop('vision_config_dict' , lowerCAmelCase__ )
super().__init__(**lowerCAmelCase__ )
_A= share_cross_modal_transformer_layers
_A= hidden_act
_A= hidden_size
_A= initializer_factor
_A= layer_norm_eps
_A= share_link_tower_layers
_A= link_tower_type
_A= num_attention_heads
_A= num_hidden_layers
_A= tie_word_embeddings
_A= init_layernorm_from_vision_encoder
if text_config is None:
_A= {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
_A= {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
_A= BridgeTowerTextConfig(**lowerCAmelCase__ )
_A= BridgeTowerVisionConfig(**lowerCAmelCase__ )
@classmethod
def a__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__ )
def a__ ( self ):
_A= copy.deepcopy(self.__dict__ )
_A= self.text_config.to_dict()
_A= self.vision_config.to_dict()
_A= self.__class__.model_type
return output
| 476
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase_ = True
except (ImportError, AttributeError):
UpperCAmelCase_ = object
def UpperCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
'''simple docstring'''
pass
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger('''transformers-cli/serving''')
def UpperCamelCase ( lowerCAmelCase_ ) -> int:
'''simple docstring'''
_A= pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowerCAmelCase_ , args.host , args.port , args.workers )
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : dict
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : List[str]
_SCREAMING_SNAKE_CASE : Optional[List[int]]
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : str
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : Any
class lowerCAmelCase ( _a ):
@staticmethod
def a__ ( lowerCAmelCase__ ):
_A= parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase__ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase__ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase__ , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase__ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase__ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase__ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase__ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_A= pipeline
_A= host
_A= port
_A= workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f"Serving model over {host}:{port}" )
_A= FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
] , timeout=600 , )
def a__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def a__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def a__ ( self , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ):
try:
_A= self._pipeline.tokenizer.tokenize(lowerCAmelCase__ )
if return_ids:
_A= self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
return ServeTokenizeResult(tokens=lowerCAmelCase__ , tokens_ids=lowerCAmelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
def a__ ( self , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , ):
try:
_A= self._pipeline.tokenizer.decode(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
async def a__ ( self , lowerCAmelCase__=Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ):
# Check we don't have empty string
if len(lowerCAmelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_A= self._pipeline(lowerCAmelCase__ )
return ServeForwardResult(output=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(500 , {'error': str(lowerCAmelCase__ )} )
| 476
| 1
|
import os
from math import logaa
def lowerCAmelCase__ ( a__: str = "base_exp.txt" ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a__ ) , a__ ) ) ):
_UpperCAmelCase , _UpperCAmelCase = list(map(a__ , line.split(',' ) ) )
if x * logaa(a__ ) > largest:
_UpperCAmelCase = x * logaa(a__ )
_UpperCAmelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 618
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ :List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class __a :
_a : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The column name of the images in the files.'} )
_a : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the training data.'} )
_a : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the validation data.'} )
_a : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
_a : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_a : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = {}
if self.train_dir is not None:
_UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
_UpperCAmelCase = self.validation_dir
_UpperCAmelCase = data_files if data_files else None
@dataclass
class __a :
_a : str = field(
default=UpperCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_a : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_a : str = field(default=UpperCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} )
_a : bool = field(
default=UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_a : float = field(
default=0.7_5 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
_a : bool = field(
default=UpperCAmelCase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class __a ( UpperCAmelCase ):
_a : float = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def lowerCAmelCase__ ( a__: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , a__ , a__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , a__ ) and data_args.train_val_split > 0.0:
_UpperCAmelCase = ds['train'].train_test_split(data_args.train_val_split )
_UpperCAmelCase = split['train']
_UpperCAmelCase = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **a__ )
elif model_args.model_name_or_path:
_UpperCAmelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **a__ )
else:
_UpperCAmelCase = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_UpperCAmelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **a__ )
elif model_args.model_name_or_path:
_UpperCAmelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **a__ )
else:
_UpperCAmelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_UpperCAmelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = ViTMAEForPreTraining(a__ )
if training_args.do_train:
_UpperCAmelCase = ds['train'].column_names
else:
_UpperCAmelCase = ds['validation'].column_names
if data_args.image_column_name is not None:
_UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
_UpperCAmelCase = 'image'
elif "img" in column_names:
_UpperCAmelCase = 'img'
else:
_UpperCAmelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_UpperCAmelCase = image_processor.size['shortest_edge']
else:
_UpperCAmelCase = (image_processor.size['height'], image_processor.size['width'])
_UpperCAmelCase = Compose(
[
Lambda(lambda a__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(a__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(a__: Optional[int] ):
_UpperCAmelCase = [transforms(a__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_UpperCAmelCase = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(a__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_UpperCAmelCase = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(a__ )
# Compute absolute learning rate
_UpperCAmelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_UpperCAmelCase = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
_UpperCAmelCase = Trainer(
model=a__ , args=a__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('eval' , a__ )
trainer.save_metrics('eval' , a__ )
# Write model card and (optionally) push to hub
_UpperCAmelCase = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
def lowerCAmelCase__ ( a__: Tuple ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 618
| 1
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = 13
lowercase__ = 7
lowercase__ = True
lowercase__ = True
lowercase__ = False
lowercase__ = True
lowercase__ = 99
lowercase__ = 32
lowercase__ = 2
lowercase__ = 4
lowercase__ = 37
lowercase__ = '''gelu'''
lowercase__ = 0.1
lowercase__ = 0.1
lowercase__ = 512
lowercase__ = 16
lowercase__ = 2
lowercase__ = 0.02
lowercase__ = 3
lowercase__ = 4
lowercase__ = None
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = DistilBertConfig(
vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : int, lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = TFDistilBertModel(config=UpperCamelCase__ )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase__ = model(UpperCamelCase__ )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int, lowerCamelCase : Any, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = TFDistilBertForMaskedLM(config=UpperCamelCase__ )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = TFDistilBertForQuestionAnswering(config=UpperCamelCase__ )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str], lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFDistilBertForSequenceClassification(UpperCamelCase__ )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : str, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = TFDistilBertForMultipleChoice(UpperCamelCase__ )
lowercase__ = tf.tile(tf.expand_dims(UpperCamelCase__, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(UpperCamelCase__, 1 ), (1, self.num_choices, 1) )
lowercase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any, lowerCamelCase : int, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFDistilBertForTokenClassification(UpperCamelCase__ )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowercase__ = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = TFDistilBertModelTester(self )
lowercase__ = ConfigTester(self, config_class=UpperCamelCase__, dim=37 )
def lowercase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase__ )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase__ )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase__ )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase__ )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase__ )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase__ )
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase__ = TFDistilBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(UpperCamelCase__ )[0]
lowercase__ = [1, 6, 768]
self.assertEqual(output.shape, UpperCamelCase__ )
lowercase__ = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3], UpperCamelCase__, atol=1E-4 )
| 700
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_UpperCamelCase : Optional[int] =None
_UpperCamelCase : List[str] =logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] ="▁"
_UpperCamelCase : Optional[Any] ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_UpperCamelCase : str ={
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_UpperCamelCase : int ={
"google/pegasus-xsum": 5_12,
}
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PegasusTokenizer
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , _snake_case=None , _snake_case=None , _snake_case="<pad>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<mask_2>" , _snake_case="<mask_1>" , _snake_case=None , _snake_case=1_03 , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError(
F'''additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is'''
F''' {type(__UpperCAmelCase )}''' )
__lowerCamelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(__UpperCAmelCase ) , self.offset - 1 )
]
if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__lowerCamelCase = additional_special_tokens_extended
else:
__lowerCamelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _lowerCamelCase ( self , _snake_case , _snake_case = None , _snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _lowerCamelCase ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 316
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 117
| 0
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if not head:
return True
# split the list to two parts
UpperCAmelCase , UpperCAmelCase : Dict = head.next, head
while fast and fast.next:
UpperCAmelCase : List[Any] = fast.next.next
UpperCAmelCase : Optional[Any] = slow.next
UpperCAmelCase : str = slow.next
UpperCAmelCase : Union[str, Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase : Optional[int] = None
while second:
UpperCAmelCase : int = second.next
UpperCAmelCase : Union[str, Any] = node
UpperCAmelCase : Optional[Any] = second
UpperCAmelCase : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase : Optional[Any] = node.next
UpperCAmelCase : int = head.next
return True
def UpperCamelCase( UpperCAmelCase_ ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase : List[str] = head
while fast and fast.next:
UpperCAmelCase , UpperCAmelCase : str = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase : List[Any] = [slow.val]
while slow.next:
UpperCAmelCase : Optional[int] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase : Optional[Any] = cur.next
return True
def UpperCamelCase( UpperCAmelCase_ ):
if not head or not head.next:
return True
UpperCAmelCase : str = {}
UpperCAmelCase : Optional[Any] = 0
while head:
if head.val in d:
d[head.val].append(UpperCAmelCase_ )
else:
UpperCAmelCase : int = [pos]
UpperCAmelCase : List[str] = head.next
pos += 1
UpperCAmelCase : int = pos - 1
UpperCAmelCase : List[str] = 0
for v in d.values():
if len(UpperCAmelCase_ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase : List[str] = 0
for i in range(0 , len(UpperCAmelCase_ ) ):
if v[i] + v[len(UpperCAmelCase_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 720
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , ):
lowerCamelCase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
lowerCamelCase_ = 1 - (matter_density + radiation_density + dark_energy)
lowerCamelCase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCamelCase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__lowercase : Dict = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 142
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__lowercase : Dict = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
__lowercase : Tuple = {
"""camembert-base""": 5_1_2,
}
__lowercase : List[str] = """▁"""
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Tuple = VOCAB_FILES_NAMES
__lowercase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase :Any = ["input_ids", "attention_mask"]
__lowercase :Union[str, Any] = CamembertTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 142
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = ['''torch''', '''torchsde''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
| 162
|
'''simple docstring'''
from torch import nn
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = class_size
_lowerCAmelCase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_lowerCAmelCase = nn.Linear(_lowercase , _lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.mlp(_lowercase )
return logits
| 162
| 1
|
'''simple docstring'''
from __future__ import annotations
__A : Tuple = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :list[list[int]] , ):
'''simple docstring'''
snake_case_ : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase_ ) )
] # the reference grid
snake_case_ : str = 1
snake_case_ : Tuple = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase_ ) )
] # the action grid
snake_case_ : int = init[0]
snake_case_ : List[Any] = init[1]
snake_case_ : int = 0
snake_case_ : Any = g + heuristic[x][y] # cost from starting cell to destination cell
snake_case_ : List[Any] = [[f, g, x, y]]
snake_case_ : Tuple = False # flag that is set when search is complete
snake_case_ : Dict = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCamelCase_ ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
snake_case_ : int = cell.pop()
snake_case_ : Dict = next_cell[2]
snake_case_ : str = next_cell[3]
snake_case_ : str = next_cell[1]
if x == goal[0] and y == goal[1]:
snake_case_ : str = True
else:
for i in range(len(lowerCamelCase_ ) ): # to try out different valid actions
snake_case_ : str = x + DIRECTIONS[i][0]
snake_case_ : Optional[int] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCamelCase_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
snake_case_ : Any = g + cost
snake_case_ : Optional[int] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
snake_case_ : Optional[Any] = 1
snake_case_ : Any = i
snake_case_ : Dict = []
snake_case_ : List[Any] = goal[0]
snake_case_ : Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
snake_case_ : Any = x - DIRECTIONS[action[x][y]][0]
snake_case_ : List[str] = y - DIRECTIONS[action[x][y]][1]
snake_case_ : str = xa
snake_case_ : int = ya
invpath.append([x, y] )
snake_case_ : List[str] = []
for i in range(len(lowerCamelCase_ ) ):
path.append(invpath[len(lowerCamelCase_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__A : Optional[Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__A : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
__A : Union[str, Any] = [len(grid) - 1, len(grid[0]) - 1]
__A : int = 1
# the cost map which pushes the path closer to the goal
__A : str = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__A : int = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__A : int = 99
__A, __A : int = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 334
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A : str = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Dict=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=None ):
'''simple docstring'''
snake_case_ : Tuple = True
while ask_again:
snake_case_ : Any = input(lowerCamelCase_ )
try:
if default is not None and len(lowerCamelCase_ ) == 0:
return default
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :List[Any]=[] , lowerCamelCase_ :Dict=None , lowerCamelCase_ :Union[str, Any]=0 ):
'''simple docstring'''
snake_case_ : List[str] = BulletMenu(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : int = menu.run(default_choice=lowerCamelCase_ )
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
snake_case_ : str = int(lowerCamelCase_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = int(lowerCamelCase_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = int(lowerCamelCase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : List[Any] = int(lowerCamelCase_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
snake_case_ : str = int(lowerCamelCase_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCAmelCase ( lowerCamelCase_ :Dict ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def a__ ( self :Tuple ,_UpperCamelCase :Any ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Any ):
snake_case_ : List[Any] = super()._format_usage(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[Any] = usage.replace("""<command> [<args>] """ ,"""""" )
return usage
| 334
| 1
|
import numpy as np
import qiskit
def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str:
snake_case__ : Optional[int] = np.random.default_rng(seed=A__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
snake_case__ : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
snake_case__ : Tuple = rng.integers(2 , size=A__ )
# The set of states Alice will prepare.
snake_case__ : List[str] = rng.integers(2 , size=A__ )
# Measurement basis for Bob's qubits.
snake_case__ : List[Any] = rng.integers(2 , size=A__ )
# Quantum Circuit to simulate BB84
snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A__ ):
if alice_state[index] == 1:
bbaa_circ.x(A__ )
if alice_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A__ ):
if bob_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ )
# Returns the result of measurement.
snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
snake_case__ : Optional[Any] = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A__ , A__ , A__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 699
|
import numpy as np
import qiskit
def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str:
snake_case__ : Optional[int] = np.random.default_rng(seed=A__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
snake_case__ : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
snake_case__ : Tuple = rng.integers(2 , size=A__ )
# The set of states Alice will prepare.
snake_case__ : List[str] = rng.integers(2 , size=A__ )
# Measurement basis for Bob's qubits.
snake_case__ : List[Any] = rng.integers(2 , size=A__ )
# Quantum Circuit to simulate BB84
snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A__ ):
if alice_state[index] == 1:
bbaa_circ.x(A__ )
if alice_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A__ ):
if bob_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ )
# Returns the result of measurement.
snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
snake_case__ : Optional[Any] = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A__ , A__ , A__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 699
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a__ :
def __init__( self :Optional[Any] , _lowerCamelCase :List[str] , _lowerCamelCase :List[str]=13 , _lowerCamelCase :List[Any]=10 , _lowerCamelCase :List[str]=3 , _lowerCamelCase :Any=2 , _lowerCamelCase :str=2 , _lowerCamelCase :Optional[int]=2 , _lowerCamelCase :List[str]=True , _lowerCamelCase :Tuple=True , _lowerCamelCase :List[Any]=32 , _lowerCamelCase :Any=5 , _lowerCamelCase :List[Any]=4 , _lowerCamelCase :Dict=37 , _lowerCamelCase :List[str]="gelu" , _lowerCamelCase :Any=0.1 , _lowerCamelCase :Optional[int]=0.1 , _lowerCamelCase :Union[str, Any]=10 , _lowerCamelCase :Optional[Any]=0.02 , _lowerCamelCase :Tuple=0.9 , _lowerCamelCase :Dict=None , ):
'''simple docstring'''
UpperCamelCase_ : int =parent
UpperCamelCase_ : Union[str, Any] =batch_size
UpperCamelCase_ : List[Any] =image_size
UpperCamelCase_ : str =num_channels
UpperCamelCase_ : int =patch_size
UpperCamelCase_ : Optional[int] =tubelet_size
UpperCamelCase_ : str =num_frames
UpperCamelCase_ : str =is_training
UpperCamelCase_ : int =use_labels
UpperCamelCase_ : Tuple =hidden_size
UpperCamelCase_ : Optional[int] =num_hidden_layers
UpperCamelCase_ : Any =num_attention_heads
UpperCamelCase_ : Union[str, Any] =intermediate_size
UpperCamelCase_ : Optional[Any] =hidden_act
UpperCamelCase_ : Any =hidden_dropout_prob
UpperCamelCase_ : Union[str, Any] =attention_probs_dropout_prob
UpperCamelCase_ : Any =type_sequence_label_size
UpperCamelCase_ : List[Any] =initializer_range
UpperCamelCase_ : int =mask_ratio
UpperCamelCase_ : Optional[int] =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
UpperCamelCase_ : Tuple =(image_size // patch_size) ** 2
UpperCamelCase_ : Dict =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
UpperCamelCase_ : List[Any] =int(mask_ratio * self.seq_length )
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : Tuple =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Optional[int] =None
if self.use_labels:
UpperCamelCase_ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : Union[str, Any] =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :List[Any] , _lowerCamelCase :List[Any] , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : Any =VideoMAEModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase_ : List[Any] =model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self :int , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Any , _lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : str =VideoMAEForPreTraining(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
UpperCamelCase_ : Tuple =torch.ones((self.num_masks,) )
UpperCamelCase_ : int =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
UpperCamelCase_ : Dict =mask.expand(self.batch_size , -1 ).bool()
UpperCamelCase_ : Optional[Any] =model(__lowerCamelCase , __lowerCamelCase )
# model only returns predictions for masked patches
UpperCamelCase_ : Union[str, Any] =mask.sum().item()
UpperCamelCase_ : int =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Any =config_and_inputs
UpperCamelCase_ : Dict ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( __snake_case , __snake_case , unittest.TestCase ):
UpperCAmelCase__ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCAmelCase__ = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : str =VideoMAEModelTester(self )
UpperCamelCase_ : Any =ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Union[str, Any]=False ):
'''simple docstring'''
UpperCamelCase_ : int =copy.deepcopy(__lowerCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
UpperCamelCase_ : Optional[Any] =torch.ones((self.model_tester.num_masks,) )
UpperCamelCase_ : str =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
UpperCamelCase_ : Union[str, Any] =mask.expand(self.model_tester.batch_size , -1 ).bool()
UpperCamelCase_ : List[str] =bool_masked_pos.to(__lowerCamelCase )
if return_labels:
if model_class in [
*get_values(__lowerCamelCase ),
]:
UpperCamelCase_ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Optional[int] =model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ : int =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Dict =model_class(__lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Dict =[*signature.parameters.keys()]
UpperCamelCase_ : Dict =['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
@slow
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : List[str] =VideoMAEModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
UpperCamelCase_ , UpperCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Union[str, Any] =True
for model_class in self.all_model_classes:
UpperCamelCase_ : Any =self.model_tester.seq_length - self.model_tester.num_masks
UpperCamelCase_ : str =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
UpperCamelCase_ : Tuple =True
UpperCamelCase_ : Optional[Any] =False
UpperCamelCase_ : Optional[int] =True
UpperCamelCase_ : List[str] =model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : Tuple =model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase_ : Any =outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase_ : Union[str, Any] =True
UpperCamelCase_ : Tuple =model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : Tuple =model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase_ : List[Any] =outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCamelCase_ : int =len(__lowerCamelCase )
# Check attention is always last and order is fine
UpperCamelCase_ : Optional[int] =True
UpperCamelCase_ : Union[str, Any] =True
UpperCamelCase_ : List[Any] =model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : Any =model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
UpperCamelCase_ : Union[str, Any] =outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase :List[Any] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :int ):
UpperCamelCase_ : int =model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : Tuple =model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase_ : Dict =outputs.hidden_states
UpperCamelCase_ : Any =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =self.model_tester.seq_length - self.model_tester.num_masks
UpperCamelCase_ : Tuple =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : List[Any] =True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : Optional[Any] =True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
pass
def A_ ( ):
UpperCamelCase_ : Union[str, Any] =hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
UpperCamelCase_ : Dict =np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
__lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =self.default_image_processor
UpperCamelCase_ : Any =prepare_video()
UpperCamelCase_ : List[str] =image_processor(__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : List[Any] =model(**__lowerCamelCase )
# verify the logits
UpperCamelCase_ : Optional[Any] =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCamelCase_ : List[Any] =torch.tensor([0.3669, -0.0688, -0.2421] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(__lowerCamelCase )
UpperCamelCase_ : List[Any] =self.default_image_processor
UpperCamelCase_ : List[Any] =prepare_video()
UpperCamelCase_ : Tuple =image_processor(__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# add boolean mask, indicating which patches to mask
UpperCamelCase_ : int =hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
UpperCamelCase_ : int =torch.load(__lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : int =model(**__lowerCamelCase )
# verify the logits
UpperCamelCase_ : Dict =torch.Size([1, 1_408, 1_536] )
UpperCamelCase_ : Any =torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=__lowerCamelCase )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
UpperCamelCase_ : str =torch.tensor([0.5142] , device=__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
UpperCamelCase_ : int =VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=__lowerCamelCase ).to(
__lowerCamelCase )
with torch.no_grad():
UpperCamelCase_ : Optional[int] =model(**__lowerCamelCase )
UpperCamelCase_ : Dict =torch.tensor(torch.tensor([0.6469] ) , device=__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCamelCase , atol=1E-4 ) )
| 357
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int=1_3 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=2_2_4 , __lowerCamelCase : int=1_0_0_0 , __lowerCamelCase : int=[3, 3, 6, 4] , __lowerCamelCase : Dict=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = num_labels
UpperCAmelCase = image_size
UpperCAmelCase = layer_depths
UpperCAmelCase = embed_dims
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _lowercase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _lowercase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = self.prepare_config_and_inputs()
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
UpperCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = SwiftFormerModelTester(self )
UpperCAmelCase = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
UpperCAmelCase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
def _config_zero_init(__lowerCamelCase : Any ):
UpperCAmelCase = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-1_0 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
UpperCAmelCase = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def _UpperCamelCase ( ) ->Dict:
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(__lowerCamelCase )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__lowerCamelCase , return_tensors="""pt""" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__lowerCamelCase )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 377
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase : Dict = logging.get_logger(__name__)
class lowercase ( a ):
lowercase__ : Union[str, Any] = ["""pixel_values"""]
def __init__( self : str , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : float = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 384}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else 224 / 256
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : float , _UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Dict , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE = int(shortest_edge / crop_pct )
SCREAMING_SNAKE_CASE = get_resize_output_image_size(_UpperCamelCase , size=_UpperCamelCase , default_to_square=_UpperCamelCase )
SCREAMING_SNAKE_CASE = resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_UpperCamelCase , size=(shortest_edge, shortest_edge) , data_format=_UpperCamelCase , **_UpperCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_UpperCamelCase , size=(shortest_edge, shortest_edge) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : str , ) -> List[str]:
'''simple docstring'''
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : List[str] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : float = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , crop_pct=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 647
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Tuple = (KDPMaDiscreteScheduler,)
lowercase__ : Optional[int] = 10
def __snake_case( self : Optional[Any] , **_UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if str(_UpperCamelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 647
| 1
|
'''simple docstring'''
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =''
for word_or_phrase in separated:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(_lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 474
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _A ( _lowerCAmelCase ):
"""simple docstring"""
create_state_space_tree(_lowerCAmelCase , [] , 0 )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if index == len(_lowerCAmelCase ):
print(_lowerCAmelCase )
return
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCamelCase = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 474
| 1
|
from __future__ import annotations
class snake_case__ :
def __init__( self : str , _lowerCamelCase : List[str]=None ):
snake_case__ : str = data
snake_case__ : Dict = None
def __repr__( self : List[str] ):
snake_case__ : Any = []
snake_case__ : Any = self
while temp:
string_rep.append(F'''{temp.data}''' )
snake_case__ : Optional[Any] = temp.next
return "->".join(_lowerCamelCase )
def lowercase__( A ):
if not elements_list:
raise Exception('The Elements List is empty' )
snake_case__ : int = Node(elements_list[0] )
for i in range(1 , len(A ) ):
snake_case__ : List[str] = Node(elements_list[i] )
snake_case__ : Optional[Any] = current.next
return head
def lowercase__( A ):
if head_node is not None and isinstance(A , A ):
print_reverse(head_node.next )
print(head_node.data )
def lowercase__( ):
from doctest import testmod
testmod()
snake_case__ : List[Any] = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print('Linked List:' )
print(A )
print('Elements in Reverse:' )
print_reverse(A )
if __name__ == "__main__":
main()
| 303
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Any = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.