code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> bool:
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__UpperCAmelCase : Optional[int] = str(UpperCamelCase )
__UpperCAmelCase : List[Any] = "".join(sorted(UpperCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _UpperCamelCase ( UpperCamelCase = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
while True:
if check_bouncy(UpperCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 77
|
"""simple docstring"""
from typing import Any
class a__ :
def __init__( self : List[str] , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : str = data
__UpperCAmelCase : Optional[Any] = None
class a__ :
def __init__( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = None
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.head
while temp is not None:
print(temp.data , end=" ")
__UpperCAmelCase : Tuple = temp.next
print()
def a_ ( self : int , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : List[str] = Node(UpperCamelCase_)
__UpperCAmelCase : str = self.head
__UpperCAmelCase : Optional[int] = new_node
def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
__UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase : Tuple = node_a.next
__UpperCAmelCase : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase : Optional[Any] = node_a.next
if node_a is None or node_a is None:
return
__UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data
if __name__ == "__main__":
A = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 77
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Any = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 187
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
lowerCamelCase__ = list(range(len(lowercase__)))
lowerCamelCase__ = [v / w for v, w in zip(lowercase__ , lowercase__)]
index.sort(key=lambda lowercase__: ratio[i] , reverse=lowercase__)
lowerCamelCase__ = 0
lowerCamelCase__ = [0] * len(lowercase__)
for i in index:
if weight[i] <= capacity:
lowerCamelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187
| 1
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCAmelCase = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def __magic_name__ ( lowercase=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowerCamelCase ) )
class a ( __lowerCamelCase ):
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[Any] = None
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_: Optional[int] =dataset_module_factory(_lowerCAmelCase , cache_dir=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_: DatasetBuilder =builder_cls(
cache_dir=_lowerCAmelCase , config_name=_lowerCAmelCase , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE_: str ="""/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_lowerCAmelCase ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
SCREAMING_SNAKE_CASE_: List[str] =cached_path(_lowerCAmelCase , cache_dir=_lowerCAmelCase )
self.assertTrue(os.path.exists(_lowerCAmelCase ) )
@pytest.mark.integration
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
SCREAMING_SNAKE_CASE_: List[str] =dataset_module_factory("""wikipedia""" , cache_dir=__lowerCamelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =import_main_class(dataset_module.module_path )
SCREAMING_SNAKE_CASE_: DatasetBuilder =builder_cls(
cache_dir=__lowerCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE_: Dict =None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE_: int =builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =dataset_module_factory("""wikipedia""" , cache_dir=__lowerCamelCase )
SCREAMING_SNAKE_CASE_: int =import_main_class(dataset_module.module_path , dataset=__lowerCamelCase )
SCREAMING_SNAKE_CASE_: DatasetBuilder =builder_cls(
cache_dir=__lowerCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE_: Dict =builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , __lowerCamelCase )
assert next(iter(ds["""train"""] ) )
| 409
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , ):
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : int = patch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : List[str] = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Any = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : Any = (image_size // patch_size) ** 2
UpperCAmelCase__ : Tuple = num_patches + 1
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : str = TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase__ : str = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase__ : Optional[Any] = self.image_size // 2
UpperCAmelCase__ : List[str] = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase__ : List[Any] = model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase__ : str = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = self.type_sequence_label_size
UpperCAmelCase__ : List[Any] = TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase__ : Tuple = self.image_size // 2
UpperCAmelCase__ : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowerCamelCase = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = TFViTModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_lowerCAmelCase )
def _lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
UpperCAmelCase__ : List[Any] = self.default_image_processor
UpperCAmelCase__ : Union[str, Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
UpperCAmelCase__ : int = model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase__ : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase__ : int = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 79
| 0
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCAmelCase__ = True
except ImportError:
lowerCAmelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCAmelCase__ = _get_torch_home()
except ImportError:
lowerCAmelCase__ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
lowerCAmelCase__ = os.path.join(torch_cache_home, """transformers""")
lowerCAmelCase__ = """https://cdn.huggingface.co"""
lowerCAmelCase__ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
lowerCAmelCase__ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
lowerCAmelCase__ = os.path.join(PATH, """config.yaml""")
lowerCAmelCase__ = os.path.join(PATH, """attributes.txt""")
lowerCAmelCase__ = os.path.join(PATH, """objects.txt""")
lowerCAmelCase__ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
lowerCAmelCase__ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCAmelCase__ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
lowerCAmelCase__ = """pytorch_model.bin"""
lowerCAmelCase__ = """config.yaml"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any]=OBJECTS , SCREAMING_SNAKE_CASE_: Any=ATTRIBUTES ) -> int:
'''simple docstring'''
A__ = []
with open(SCREAMING_SNAKE_CASE_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
A__ = []
with open(SCREAMING_SNAKE_CASE_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> str:
'''simple docstring'''
A__ = OrderedDict()
with open(SCREAMING_SNAKE_CASE_ , "rb" ) as f:
A__ = pkl.load(SCREAMING_SNAKE_CASE_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
A__ = ckp.pop(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
A__ = torch.tensor(SCREAMING_SNAKE_CASE_ )
else:
assert isinstance(SCREAMING_SNAKE_CASE_ , torch.tensor ), type(SCREAMING_SNAKE_CASE_ )
A__ = v
return r
class a__ :
"""simple docstring"""
__lowerCamelCase = {}
def __init__( self , lowercase , lowercase = "root" , lowercase=0 ) -> Union[str, Any]:
'''simple docstring'''
A__ = name
A__ = level
A__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
A__ = copy.deepcopy(lowercase )
A__ = copy.deepcopy(lowercase )
if isinstance(lowercase , lowercase ):
A__ = Config(lowercase , name=lowercase , level=level + 1 )
A__ = v
setattr(self , lowercase , lowercase )
A__ = d
def __repr__( self ) -> List[Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , lowercase , lowercase ) -> Dict:
'''simple docstring'''
A__ = val
A__ = val
A__ = key.split("." )
A__ = len(lowercase ) - 1
A__ = self._pointer
if len(lowercase ) > 1:
for i, l in enumerate(lowercase ):
if hasattr(self , lowercase ) and isinstance(getattr(self , lowercase ) , lowercase ):
setattr(getattr(self , lowercase ) , ".".join(levels[i:] ) , lowercase )
if l == last_level:
A__ = val
else:
A__ = pointer[l]
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return self._pointer
def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
with open(F'{file_name}' , "w" ) as stream:
dump(lowercase , lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> List[str]:
'''simple docstring'''
with open(F'{file_name}' , "w" ) as stream:
json.dump(lowercase , lowercase )
@staticmethod
def UpperCamelCase ( lowercase ) -> Optional[int]:
'''simple docstring'''
with open(lowercase ) as stream:
A__ = load(lowercase , Loader=lowercase )
return data
def __str__( self ) -> Optional[int]:
'''simple docstring'''
A__ = " "
if self._name != "root":
A__ = F'{t * (self._level-1)}{self._name}:\n'
else:
A__ = ""
A__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(lowercase , lowercase ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(lowercase ).__name__})\n'
A__ = level
return r[:-1]
@classmethod
def UpperCamelCase ( cls , lowercase , **lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ = cls.get_config_dict(lowercase , **lowercase )
return cls(lowercase )
@classmethod
def UpperCamelCase ( cls , lowercase , **lowercase ) -> List[str]:
'''simple docstring'''
A__ = kwargs.pop("cache_dir" , lowercase )
A__ = kwargs.pop("force_download" , lowercase )
A__ = kwargs.pop("resume_download" , lowercase )
A__ = kwargs.pop("proxies" , lowercase )
A__ = kwargs.pop("local_files_only" , lowercase )
if os.path.isdir(lowercase ):
A__ = os.path.join(lowercase , lowercase )
elif os.path.isfile(lowercase ) or is_remote_url(lowercase ):
A__ = pretrained_model_name_or_path
else:
A__ = hf_bucket_url(lowercase , filename=lowercase , use_cdn=lowercase )
try:
# Load from URL or cache if already cached
A__ = cached_path(
lowercase , cache_dir=lowercase , force_download=lowercase , proxies=lowercase , resume_download=lowercase , local_files_only=lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
A__ = Config.load_yaml(lowercase )
except EnvironmentError:
A__ = "Can't load config for"
raise EnvironmentError(lowercase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(lowercase ), kwargs
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> List[str]:
'''simple docstring'''
A__ = torch.load("dump.pt" , map_location=in_tensor.device )
A__ = in_tensor.numpy()
A__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=0.01 , atol=0.1 ), (
F'{sum([1 for x in np.isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_0_0:.4f} %'
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> List[str]:
'''simple docstring'''
A__ = urlparse(SCREAMING_SNAKE_CASE_ )
return parsed.scheme in ("http", "https")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Any=True ) -> str:
'''simple docstring'''
A__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
A__ = "/" not in model_id
if legacy_format:
return F'{endpoint}/{model_id}-{filename}'
else:
return F'{endpoint}/{model_id}/{filename}'
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Union[str, Any]=None , SCREAMING_SNAKE_CASE_: Union[str, Any]=0 , SCREAMING_SNAKE_CASE_: Union[str, Any]=None , ) -> Dict:
'''simple docstring'''
A__ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
ua += "; " + "; ".join("{}/{}".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
ua += "; " + user_agent
A__ = {"user-agent": ua}
if resume_size > 0:
A__ = "bytes=%d-" % (resume_size,)
A__ = requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ )
if response.status_code == 4_1_6: # Range not satisfiable
return
A__ = response.headers.get("Content-Length" )
A__ = resume_size + int(SCREAMING_SNAKE_CASE_ ) if content_length is not None else None
A__ = tqdm(
unit="B" , unit_scale=SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , initial=SCREAMING_SNAKE_CASE_ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1_0_2_4 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(SCREAMING_SNAKE_CASE_ ) )
temp_file.write(SCREAMING_SNAKE_CASE_ )
progress.close()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str=None , SCREAMING_SNAKE_CASE_: str=False , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE_: str=False , SCREAMING_SNAKE_CASE_: str=None , SCREAMING_SNAKE_CASE_: Tuple=False , ) -> str:
'''simple docstring'''
if cache_dir is None:
A__ = TRANSFORMERS_CACHE
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = str(SCREAMING_SNAKE_CASE_ )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
A__ = None
if not local_files_only:
try:
A__ = requests.head(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , timeout=SCREAMING_SNAKE_CASE_ )
if response.status_code == 2_0_0:
A__ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
A__ = url_to_filename(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# get cache path to put the file
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
return cache_path
else:
A__ = [
file
for file in fnmatch.filter(os.listdir(SCREAMING_SNAKE_CASE_ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
return os.path.join(SCREAMING_SNAKE_CASE_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
A__ = cache_path + ".lock"
with FileLock(SCREAMING_SNAKE_CASE_ ):
# If the download just completed while the lock was activated.
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
A__ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(SCREAMING_SNAKE_CASE_ , "a+b" ) as f:
yield f
A__ = _resumable_file_manager
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
A__ = os.stat(SCREAMING_SNAKE_CASE_ ).st_size
else:
A__ = 0
else:
A__ = partial(tempfile.NamedTemporaryFile , dir=SCREAMING_SNAKE_CASE_ , delete=SCREAMING_SNAKE_CASE_ )
A__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , SCREAMING_SNAKE_CASE_ , temp_file.name , )
http_get(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_size=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , )
os.replace(temp_file.name , SCREAMING_SNAKE_CASE_ )
A__ = {"url": url, "etag": etag}
A__ = cache_path + ".json"
with open(SCREAMING_SNAKE_CASE_ , "w" ) as meta_file:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cache_path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Any=None ) -> int:
'''simple docstring'''
A__ = url.encode("utf-8" )
A__ = shaaaa(SCREAMING_SNAKE_CASE_ )
A__ = url_hash.hexdigest()
if etag:
A__ = etag.encode("utf-8" )
A__ = shaaaa(SCREAMING_SNAKE_CASE_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[Any]=None , SCREAMING_SNAKE_CASE_: Dict=False , SCREAMING_SNAKE_CASE_: List[str]=None , SCREAMING_SNAKE_CASE_: str=False , SCREAMING_SNAKE_CASE_: int=None , SCREAMING_SNAKE_CASE_: Optional[int]=False , SCREAMING_SNAKE_CASE_: str=False , SCREAMING_SNAKE_CASE_: Optional[Any]=False , ) -> Tuple:
'''simple docstring'''
if cache_dir is None:
A__ = TRANSFORMERS_CACHE
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = str(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = str(SCREAMING_SNAKE_CASE_ )
if is_remote_url(SCREAMING_SNAKE_CASE_ ):
# URL, so get it from the cache (downloading if necessary)
A__ = get_from_cache(
SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
elif os.path.exists(SCREAMING_SNAKE_CASE_ ):
# File, and it exists.
A__ = url_or_filename
elif urlparse(SCREAMING_SNAKE_CASE_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(SCREAMING_SNAKE_CASE_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(SCREAMING_SNAKE_CASE_ ) )
if extract_compressed_file:
if not is_zipfile(SCREAMING_SNAKE_CASE_ ) and not tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
A__ , A__ = os.path.split(SCREAMING_SNAKE_CASE_ )
A__ = output_file.replace("." , "-" ) + "-extracted"
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.isdir(SCREAMING_SNAKE_CASE_ ) and os.listdir(SCREAMING_SNAKE_CASE_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
A__ = output_path + ".lock"
with FileLock(SCREAMING_SNAKE_CASE_ ):
shutil.rmtree(SCREAMING_SNAKE_CASE_ , ignore_errors=SCREAMING_SNAKE_CASE_ )
os.makedirs(SCREAMING_SNAKE_CASE_ )
if is_zipfile(SCREAMING_SNAKE_CASE_ ):
with ZipFile(SCREAMING_SNAKE_CASE_ , "r" ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE_ )
zip_file.close()
elif tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ ):
A__ = tarfile.open(SCREAMING_SNAKE_CASE_ )
tar_file.extractall(SCREAMING_SNAKE_CASE_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(SCREAMING_SNAKE_CASE_ ) )
return output_path_extracted
return output_path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[Any]="," ) -> int:
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = eval(f.read() )
else:
A__ = requests.get(SCREAMING_SNAKE_CASE_ )
try:
A__ = requests.json()
except Exception:
A__ = req.content.decode()
assert data is not None, "could not connect"
try:
A__ = eval(SCREAMING_SNAKE_CASE_ )
except Exception:
A__ = data.split("\n" )
req.close()
return data
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> List[str]:
'''simple docstring'''
A__ = requests.get(SCREAMING_SNAKE_CASE_ )
A__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , "rb" ) as stream:
A__ = pkl.load(SCREAMING_SNAKE_CASE_ )
A__ = weights.pop("model" )
A__ = {}
for k, v in model.items():
A__ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
if "running_var" in k:
A__ = torch.tensor([0] )
A__ = k.replace("running_var" , "num_batches_tracked" )
A__ = zero
return new
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
print(F'{os.path.abspath(os.path.join(SCREAMING_SNAKE_CASE_ , os.pardir ) )}/demo.ipynb' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Union[str, Any]="RGB" ) -> Tuple:
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
A__ = cva.imread(SCREAMING_SNAKE_CASE_ )
else:
A__ = get_image_from_url(SCREAMING_SNAKE_CASE_ )
assert img is not None, F'could not connect to: {im}'
A__ = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
A__ = img[:, :, ::-1]
return img
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Dict=1 ) -> Optional[int]:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ))
| 626
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
A__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
A__ = model(lowercase )["last_hidden_state"]
A__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 626
| 1
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Tuple=True , _SCREAMING_SNAKE_CASE :int="pt" ) -> int:
a_ : List[str] = {"add_prefix_space": True} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not line.startswith(" " ) else {}
a_ : Union[str, Any] = padding_side
return tokenizer(
[line] , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" if pad_to_max_length else None , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :str=None , ) -> Any:
a_ : str = input_ids.ne(_SCREAMING_SNAKE_CASE ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="train" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="" , ) -> Optional[Any]:
super().__init__()
a_ : Any = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + ".source" )
a_ : Optional[int] = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + ".target" )
a_ : List[str] = self.get_char_lens(self.src_file )
a_ : List[str] = max_source_length
a_ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
a_ : List[str] = tokenizer
a_ : Optional[int] = prefix
if n_obs is not None:
a_ : Dict = self.src_lens[:n_obs]
a_ : Optional[Any] = src_lang
a_ : str = tgt_lang
def __len__( self ) -> Dict:
return len(self.src_lens )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
a_ : Any = index + 1 # linecache starts at 1
a_ : str = self.prefix + linecache.getline(str(self.src_file ) , _SCREAMING_SNAKE_CASE ).rstrip("\n" )
a_ : List[str] = linecache.getline(str(self.tgt_file ) , _SCREAMING_SNAKE_CASE ).rstrip("\n" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
a_ : Tuple = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ) else self.tokenizer
)
a_ : Any = self.tokenizer.generator if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ) else self.tokenizer
a_ : Optional[Any] = encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_source_length , "right" )
a_ : Any = encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_target_length , "right" )
a_ : str = source_inputs["input_ids"].squeeze()
a_ : int = target_inputs["input_ids"].squeeze()
a_ : Union[str, Any] = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A ( _SCREAMING_SNAKE_CASE ) -> Dict:
return [len(_SCREAMING_SNAKE_CASE ) for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()]
def A ( self , _SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
a_ : Dict = torch.stack([x["input_ids"] for x in batch] )
a_ : List[Any] = torch.stack([x["attention_mask"] for x in batch] )
a_ : str = torch.stack([x["decoder_input_ids"] for x in batch] )
a_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
a_ : Union[str, Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
a_ : List[Any] = trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ , a_ : Union[str, Any] = trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
a_ : str = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
UpperCamelCase = getLogger(__name__)
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[List] ) -> Tuple:
return list(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str ) -> None:
a_ : Tuple = get_git_info()
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , "git_log.json" ) )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Tuple=4 , **_SCREAMING_SNAKE_CASE :Optional[int] ) -> Dict:
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[int] ) -> int:
with open(_SCREAMING_SNAKE_CASE ) as f:
return json.load(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ () -> str:
a_ : Union[str, Any] = git.Repo(search_parent_directories=_SCREAMING_SNAKE_CASE )
a_ : List[str] = {
"repo_id": str(_SCREAMING_SNAKE_CASE ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Callable , _SCREAMING_SNAKE_CASE :Iterable ) -> List:
return list(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Union[str, Any]:
with open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
return pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Dict ) -> Dict:
def remove_articles(_SCREAMING_SNAKE_CASE :List[Any] ):
return re.sub(r"\b(a|an|the)\b" , " " , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE :Tuple ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE :Optional[int] ):
a_ : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE :List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :List[str] ) -> Union[str, Any]:
a_ : Tuple = normalize_answer(_SCREAMING_SNAKE_CASE ).split()
a_ : Any = normalize_answer(_SCREAMING_SNAKE_CASE ).split()
a_ : Optional[Any] = Counter(_SCREAMING_SNAKE_CASE ) & Counter(_SCREAMING_SNAKE_CASE )
a_ : str = sum(common.values() )
if num_same == 0:
return 0
a_ : Any = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
a_ : Any = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Any ) -> int:
return normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :List[str] ) -> Dict:
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = 0
for hypo, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
em += exact_match_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
em /= len(_SCREAMING_SNAKE_CASE )
return {"em": em}
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[Any] ) -> Tuple:
return model_prefix.startswith("rag" )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Union[str, Any]:
a_ : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
a_ : Optional[Any] = "dropout_rate"
for p in extra_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not hasattr(_SCREAMING_SNAKE_CASE , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_SCREAMING_SNAKE_CASE ) )
delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
a_ : Tuple = p if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else equivalent_param[p]
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return hparams, config
| 473
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :tuple[int, int] , _SCREAMING_SNAKE_CASE :int ) -> list[tuple[int, int]]:
a_ , a_ : Optional[int] = position
a_ : Optional[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : List[str] = []
for position in positions:
a_ , a_ : List[Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_SCREAMING_SNAKE_CASE )
return permissible_positions
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :list[list[int]] ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :list[list[int]] , _SCREAMING_SNAKE_CASE :tuple[int, int] , _SCREAMING_SNAKE_CASE :int ) -> bool:
if is_complete(_SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
a_ , a_ : int = position
if board[y][x] == 0:
a_ : Any = curr + 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ):
return True
a_ : Optional[Any] = 0
return False
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :int ) -> list[list[int]]:
a_ : int = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
a_ : Optional[int] = 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
a_ : Dict = 0
a_ : Union[str, Any] = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 473
| 1
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case__ ):
UpperCAmelCase_ = ["""pixel_values"""]
def __init__( self , lowercase_ = True , lowercase_ = 1 / 2_55 , lowercase_ = True , lowercase_ = 8 , **lowercase_ , ):
super().__init__(**lowercase_)
snake_case_ : int = do_rescale
snake_case_ : Dict = rescale_factor
snake_case_ : Any = do_pad
snake_case_ : Dict = pad_size
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_ = None):
snake_case_ : int = get_image_size(lowercase_)
snake_case_ : Optional[Any] = (old_height // size + 1) * size - old_height
snake_case_ : Any = (old_width // size + 1) * size - old_width
return pad(lowercase_ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=lowercase_)
def snake_case__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
snake_case_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : int = do_pad if do_pad is not None else self.do_pad
snake_case_ : int = pad_size if pad_size is not None else self.pad_size
snake_case_ : Optional[int] = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
# All transformations expect numpy arrays.
snake_case_ : Union[str, Any] = [to_numpy_array(lowercase_) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_pad:
snake_case_ : str = [self.pad(lowercase_ , size=lowercase_) for image in images]
snake_case_ : int = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
snake_case_ : Dict = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 703
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = 1_6 ):
"""simple docstring"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : int = DatasetDict(
{
"train": dataset["train"].select(__SCREAMING_SNAKE_CASE ),
"validation": dataset["train"].select(__SCREAMING_SNAKE_CASE ),
"test": dataset["validation"],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : str = tokenizer(examples["sentence1"], examples["sentence2"], truncation=__SCREAMING_SNAKE_CASE, max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : List[Any] = datasets.map(
__SCREAMING_SNAKE_CASE, batched=__SCREAMING_SNAKE_CASE, remove_columns=["idx", "sentence1", "sentence2"], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Dict = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Optional[int] = 1_6
elif accelerator.mixed_precision != "no":
snake_case_ : Tuple = 8
else:
snake_case_ : Union[str, Any] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE, padding="longest", max_length=__SCREAMING_SNAKE_CASE, pad_to_multiple_of=__SCREAMING_SNAKE_CASE, return_tensors="pt", )
# Instantiate dataloaders.
snake_case_ : Optional[int] = DataLoader(
tokenized_datasets["train"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
snake_case_ : int = DataLoader(
tokenized_datasets["validation"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = DataLoader(
tokenized_datasets["test"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
# Download the dataset
snake_case_ : Tuple = load_dataset("glue", "mrpc" )
# Create our splits
snake_case_ : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
snake_case_ : Optional[Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Optional[Any] = config["lr"]
snake_case_ : str = int(config["num_epochs"] )
snake_case_ : Tuple = int(config["seed"] )
snake_case_ : Optional[Any] = int(config["batch_size"] )
snake_case_ : List[Any] = evaluate.load("glue", "mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case_ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
snake_case_ : int = kfold.split(np.zeros(datasets["train"].num_rows ), datasets["train"]["label"] )
snake_case_ : Optional[int] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ : Tuple = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Any = AdamW(params=model.parameters(), lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
snake_case_ : List[str] = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE, num_warmup_steps=1_0_0, num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : str = accelerator.prepare(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = outputs.loss
snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : str = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE, )
snake_case_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:', __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
snake_case_ : Any = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : int = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = outputs.logits
snake_case_ , snake_case_ : List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
snake_case_ : List[Any] = torch.cat(__SCREAMING_SNAKE_CASE, dim=0 )
snake_case_ : Any = torch.stack(__SCREAMING_SNAKE_CASE, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
snake_case_ : Tuple = metric.compute(predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE )
accelerator.print("Average test metrics from all folds:", __SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision", type=__SCREAMING_SNAKE_CASE, default=__SCREAMING_SNAKE_CASE, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds", type=__SCREAMING_SNAKE_CASE, default=3, help="The number of splits to perform across the dataset" )
snake_case_ : List[Any] = parser.parse_args()
snake_case_ : Optional[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 92
| 0
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
_UpperCAmelCase : int = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
_UpperCAmelCase : str = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
_UpperCAmelCase : Optional[int] = Counter()
for tk_ids in data:
counter.update(tk_ids)
_UpperCAmelCase : Union[str, Any] = [0] * args.vocab_size
for k, v in counter.items():
_UpperCAmelCase : Union[str, Any] = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 107
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 626
| 0
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
@property
def _A ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _A ( self ):
"""simple docstring"""
a__ : List[str] = ort.SessionOptions()
a__ : int = False
return options
def _A ( self ):
"""simple docstring"""
a__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
a__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
a__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
a__ : int = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a__ : Union[str, Any] = "A red cat sitting on a park bench"
a__ : int = np.random.RandomState(0 )
a__ : List[str] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=__UpperCAmelCase , output_type="np" , )
a__ : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 207
|
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> float:
a__ : Optional[Any] = 0
while len(__UpperCamelCase ) > 1:
a__ : str = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
a__ : List[str] = files.index(min(__UpperCamelCase ) )
temp += files[min_index]
files.pop(__UpperCamelCase )
files.append(__UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
__a : List[Any] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_lowercase ) , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_lowercase ) , x.transpose() ) )
__a : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(3 , 4 )
__a : Optional[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
__a : Optional[Any] = np.random.randn(3 , 4 , 5 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = np.random.randn(3 , 4 )
__a : int = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
__a : Any = np.random.randn(3 , 4 , 5 )
__a : List[str] = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = np.random.randn(3 , 4 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , np.asarray(transpose(_lowercase ) ) ) )
__a : Optional[int] = np.random.randn(3 , 4 , 5 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowercase , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.reshape(_lowercase , (4, 3) ) ) )
__a : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , np.reshape(_lowercase , (12, 5) ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = np.random.randn(3 , 4 )
__a : List[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
__a : List[str] = np.random.randn(3 , 4 , 5 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , reshape(_lowercase , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = np.random.randn(3 , 4 )
__a : Dict = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
__a : Tuple = np.random.randn(3 , 4 , 5 )
__a : Optional[Any] = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , reshape(_lowercase , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = np.random.randn(3 , 4 )
__a : Tuple = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.asarray(reshape(_lowercase , (4, 3) ) ) ) )
__a : Tuple = np.random.randn(3 , 4 , 5 )
__a : Optional[int] = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , np.asarray(reshape(_lowercase , (12, 5) ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.squeeze(_lowercase ) ) )
__a : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.squeeze(_lowercase , axis=2 ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = np.random.randn(1 , 3 , 4 )
__a : List[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
__a : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
__a : str = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(1 , 3 , 4 )
__a : Tuple = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
__a : Any = np.random.randn(1 , 4 , 1 , 5 )
__a : List[str] = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(1 , 3 , 4 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.asarray(squeeze(_lowercase ) ) ) )
__a : Any = np.random.randn(1 , 4 , 1 , 5 )
__a : Optional[Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.asarray(squeeze(_lowercase , axis=2 ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.expand_dims(_lowercase , axis=1 ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = np.random.randn(3 , 4 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(3 , 4 )
__a : str = tf.constant(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(3 , 4 )
__a : Optional[Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.asarray(expand_dims(_lowercase , axis=1 ) ) ) )
| 581
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 581
| 1
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCamelCase__ : List[Any] = get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ = None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = (
os.path.join(lowerCamelCase_ ,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCAmelCase__ : Any = Extractor
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCAmelCase__ : Any = os.path.abspath(lowerCamelCase_ )
return os.path.join(self.extract_dir ,hash_url_to_filename(lowerCamelCase_ ) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(lowerCamelCase_ ) and not (os.path.isdir(lowerCamelCase_ ) and os.listdir(lowerCamelCase_ ))
)
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = False ) -> str:
'''simple docstring'''
UpperCAmelCase__ : int = self.extractor.infer_extractor_format(lowerCamelCase_ )
if not extractor_format:
return input_path
UpperCAmelCase__ : str = self._get_output_path(lowerCamelCase_ )
if self._do_extract(lowerCamelCase_ ,lowerCamelCase_ ):
self.extractor.extract(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return output_path
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
@classmethod
@abstractmethod
def lowerCAmelCase__ ( cls ,lowerCamelCase_ ,**lowerCamelCase_ ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> None:
'''simple docstring'''
...
class _lowercase ( lowerCAmelCase ,lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : List[bytes] = []
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> List[Any]:
'''simple docstring'''
with open(lowerCamelCase_ ,'''rb''' ) as f:
return f.read(lowerCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls ,lowerCamelCase_ ,lowerCamelCase_ = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
UpperCAmelCase__ : Union[str, Any] = max(len(lowerCamelCase_ ) for cls_magic_number in cls.magic_numbers )
try:
UpperCAmelCase__ : Any = cls.read_magic_number(lowerCamelCase_ ,lowerCamelCase_ )
except OSError:
return False
return any(magic_number.startswith(lowerCamelCase_ ) for cls_magic_number in cls.magic_numbers )
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls ,lowerCamelCase_ ,**lowerCamelCase_ ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(lowerCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
def resolved(lowerCamelCase_ ) -> str:
return os.path.realpath(os.path.abspath(lowerCamelCase_ ) )
def badpath(lowerCamelCase_ ,lowerCamelCase_ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCamelCase_ ,lowerCamelCase_ ) ).startswith(lowerCamelCase_ )
def badlink(lowerCamelCase_ ,lowerCamelCase_ ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCAmelCase__ : List[Any] = resolved(os.path.join(lowerCamelCase_ ,os.path.dirname(info.name ) ) )
return badpath(info.linkname ,base=lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = resolved(lowerCamelCase_ )
for finfo in members:
if badpath(finfo.name ,lowerCamelCase_ ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(lowerCamelCase_ ,lowerCamelCase_ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(lowerCamelCase_ ,lowerCamelCase_ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> None:
'''simple docstring'''
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_ )
UpperCAmelCase__ : Dict = tarfile.open(lowerCamelCase_ )
tar_file.extractall(lowerCamelCase_ ,members=TarExtractor.safemembers(lowerCamelCase_ ,lowerCamelCase_ ) )
tar_file.close()
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = [b'''\x1F\x8B''']
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> None:
'''simple docstring'''
with gzip.open(lowerCamelCase_ ,'''rb''' ) as gzip_file:
with open(lowerCamelCase_ ,'''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCamelCase_ ,lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def lowerCAmelCase__ ( cls ,lowerCamelCase_ ,lowerCamelCase_ = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(lowerCamelCase_ ,magic_number=lowerCamelCase_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCamelCase_ ,'''rb''' ) as fp:
UpperCAmelCase__ : Union[str, Any] = _EndRecData(lowerCamelCase_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCAmelCase__ : Union[str, Any] = fp.read(lowerCamelCase_ ) # CD is where we expect it to be
if len(lowerCamelCase_ ) == sizeCentralDir:
UpperCAmelCase__ : Dict = struct.unpack(lowerCamelCase_ ,lowerCamelCase_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> None:
'''simple docstring'''
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_ )
with zipfile.ZipFile(lowerCamelCase_ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCamelCase_ )
zip_file.close()
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> None:
'''simple docstring'''
with lzma.open(lowerCamelCase_ ) as compressed_file:
with open(lowerCamelCase_ ,'''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCamelCase_ ,lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = rarfile.RarFile(lowerCamelCase_ )
rf.extractall(lowerCamelCase_ )
rf.close()
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
UpperCAmelCase__ : Union[str, Any] = zstd.ZstdDecompressor()
with open(lowerCamelCase_ ,'''rb''' ) as ifh, open(lowerCamelCase_ ,'''wb''' ) as ofh:
dctx.copy_stream(lowerCamelCase_ ,lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : int = [b'''\x42\x5A\x68''']
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> None:
'''simple docstring'''
with bza.open(lowerCamelCase_ ,'''rb''' ) as compressed_file:
with open(lowerCamelCase_ ,'''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCamelCase_ ,lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_ )
with pyazr.SevenZipFile(lowerCamelCase_ ,'''r''' ) as archive:
archive.extractall(lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [b'''\x04\x22\x4D\x18''']
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(lowerCamelCase_ ,'''rb''' ) as compressed_file:
with open(lowerCamelCase_ ,'''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCamelCase_ ,lowerCamelCase_ )
class _lowercase :
'''simple docstring'''
UpperCAmelCase_ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCAmelCase__ ( cls ) -> Optional[Any]:
'''simple docstring'''
return max(
len(lowerCamelCase_ )
for extractor in cls.extractors.values()
if issubclass(lowerCamelCase_ ,lowerCamelCase_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCamelCase_ ,magic_number_length=lowerCamelCase_ )
except OSError:
return b""
@classmethod
def lowerCAmelCase__ ( cls ,lowerCamelCase_ ,lowerCamelCase_ = False ) -> bool:
'''simple docstring'''
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' ,category=lowerCamelCase_ ,)
UpperCAmelCase__ : Union[str, Any] = cls.infer_extractor_format(lowerCamelCase_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCAmelCase__ ( cls ,lowerCamelCase_ ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
UpperCAmelCase__ : Dict = cls._get_magic_number_max_length()
UpperCAmelCase__ : Optional[Any] = cls._read_magic_number(lowerCamelCase_ ,lowerCamelCase_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCamelCase_ ,magic_number=lowerCamelCase_ ):
return extractor_format
@classmethod
def lowerCAmelCase__ ( cls ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = "deprecated" ,) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(lowerCamelCase_ ) ,exist_ok=lowerCamelCase_ )
# Prevent parallel extractions
UpperCAmelCase__ : List[str] = str(Path(lowerCamelCase_ ).with_suffix('''.lock''' ) )
with FileLock(lowerCamelCase_ ):
shutil.rmtree(lowerCamelCase_ ,ignore_errors=lowerCamelCase_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCamelCase_ ,lowerCamelCase_ ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' ,category=lowerCamelCase_ ,)
UpperCAmelCase__ : Dict = extractor if extractor != '''deprecated''' else extractor_format
else:
UpperCAmelCase__ : List[str] = cls.extractors[extractor_format]
return extractor.extract(lowerCamelCase_ ,lowerCamelCase_ )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' ,category=lowerCamelCase_ ,)
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCamelCase_ ):
return extractor.extract(lowerCamelCase_ ,lowerCamelCase_ )
| 496
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ['''image_processor''', '''tokenizer''']
UpperCAmelCase_ : str = '''CLIPImageProcessor'''
UpperCAmelCase_ : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,lowerCamelCase_ ,)
UpperCAmelCase__ : str = kwargs.pop('''feature_extractor''' )
UpperCAmelCase__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase_ ,lowerCamelCase_ )
def __call__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> Dict:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase__ : Tuple = self.tokenizer(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if images is not None:
UpperCAmelCase__ : Optional[int] = self.image_processor(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if text is not None and images is not None:
UpperCAmelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) ,tensor_type=lowerCamelCase_ )
def lowerCAmelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_ ,**lowerCamelCase_ )
@property
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.tokenizer.model_input_names
UpperCAmelCase__ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,lowerCamelCase_ ,)
return self.image_processor_class
@property
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,lowerCamelCase_ ,)
return self.image_processor
| 496
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__snake_case : bool = field(default=snake_case__ , metadata={"""help""": """Whether tp freeze the encoder."""} )
__snake_case : bool = field(default=snake_case__ , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
__snake_case : Optional[str] = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
__snake_case : Optional[int] = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__snake_case : Optional[int] = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__snake_case : Optional[int] = field(
default=1_42 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
__snake_case : Optional[int] = field(
default=1_42 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__snake_case : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
__snake_case : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
__snake_case : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
__snake_case : Optional[str] = field(default=snake_case__ , metadata={"""help""": """Source language id for translation."""} )
__snake_case : Optional[str] = field(default=snake_case__ , metadata={"""help""": """Target language id for translation."""} )
__snake_case : Optional[int] = field(default=snake_case__ , metadata={"""help""": """# num_beams to use for evaluation."""} )
__snake_case : bool = field(
default=snake_case__ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , F'{split}_results.json' ) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase : Optional[int] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase : Tuple =parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Optional[Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase : Union[str, Any] =('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : Union[str, Any] =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__lowerCamelCase : List[str] =model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] =tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__lowerCamelCase : Optional[int] =tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__lowerCamelCase : Tuple =SeqaSeqDataset
# Get datasets
__lowerCamelCase : List[str] =(
dataset_class(
SCREAMING_SNAKE_CASE , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__lowerCamelCase : Optional[Any] =(
dataset_class(
SCREAMING_SNAKE_CASE , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__lowerCamelCase : Any =(
dataset_class(
SCREAMING_SNAKE_CASE , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__lowerCamelCase : str =(
build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
__lowerCamelCase : Dict =SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , data_args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
__lowerCamelCase : Union[str, Any] ={}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__lowerCamelCase : Dict =trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__lowerCamelCase : Dict =train_result.metrics
__lowerCamelCase : Optional[Any] =data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowerCamelCase : Any =trainer.evaluate(metric_key_prefix='''val''' )
__lowerCamelCase : int =data_args.n_val
__lowerCamelCase : Tuple =round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__lowerCamelCase : Tuple =trainer.predict(test_dataset=SCREAMING_SNAKE_CASE , metric_key_prefix='''test''' )
__lowerCamelCase : Optional[int] =test_output.metrics
__lowerCamelCase : List[Any] =data_args.n_test
if trainer.is_world_process_zero():
__lowerCamelCase : Any =round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
__lowerCamelCase : int =tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict =lmap(str.strip , SCREAMING_SNAKE_CASE )
write_txt_file(SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 714
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple=8 ):
'''simple docstring'''
__lowerCamelCase : Tuple =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase : Tuple =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :UNetaDConditionModel , __lowercase :DDPMScheduler , __lowercase :VQModel , ):
super().__init__()
self.register_modules(
unet=__lowercase , scheduler=__lowercase , movq=__lowercase , )
__lowerCamelCase : Any =2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowercase ( self :int , __lowercase :Tuple , __lowercase :Dict , __lowercase :str , __lowercase :Dict , __lowercase :Union[str, Any] , __lowercase :Dict ):
if latents is None:
__lowerCamelCase : str =randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase : str =latents.to(__lowercase )
__lowerCamelCase : Any =latents * scheduler.init_noise_sigma
return latents
def __lowercase ( self :Dict , __lowercase :Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__lowerCamelCase : Any =torch.device(f'cuda:{gpu_id}' )
__lowerCamelCase : Dict =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowercase , __lowercase )
def __lowercase ( self :Optional[int] , __lowercase :List[Any]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__lowerCamelCase : str =torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase : Optional[Any] =None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] =cpu_offload_with_hook(__lowercase , __lowercase , prev_module_hook=__lowercase )
# We'll offload the last model manually.
__lowerCamelCase : Dict =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowercase ( self :Tuple ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowercase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowercase )
def __call__( self :Union[str, Any] , __lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowercase :torch.FloatTensor , __lowercase :int = 512 , __lowercase :int = 512 , __lowercase :int = 100 , __lowercase :float = 4.0 , __lowercase :int = 1 , __lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowercase :Optional[torch.FloatTensor] = None , __lowercase :Optional[str] = "pil" , __lowercase :bool = True , ):
__lowerCamelCase : Any =self._execution_device
__lowerCamelCase : Dict =guidance_scale > 1.0
if isinstance(__lowercase , __lowercase ):
__lowerCamelCase : Any =torch.cat(__lowercase , dim=0 )
if isinstance(__lowercase , __lowercase ):
__lowerCamelCase : Dict =torch.cat(__lowercase , dim=0 )
if isinstance(__lowercase , __lowercase ):
__lowerCamelCase : Dict =torch.cat(__lowercase , dim=0 )
__lowerCamelCase : List[Any] =image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase : Optional[int] =image_embeds.repeat_interleave(__lowercase , dim=0 )
__lowerCamelCase : Any =negative_image_embeds.repeat_interleave(__lowercase , dim=0 )
__lowerCamelCase : List[Any] =hint.repeat_interleave(__lowercase , dim=0 )
__lowerCamelCase : List[str] =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowercase )
__lowerCamelCase : List[str] =torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__lowercase )
self.scheduler.set_timesteps(__lowercase , device=__lowercase )
__lowerCamelCase : str =self.scheduler.timesteps
__lowerCamelCase : Tuple =self.movq.config.latent_channels
__lowerCamelCase , __lowerCamelCase : List[str] =downscale_height_and_width(__lowercase , __lowercase , self.movq_scale_factor )
# create initial latent
__lowerCamelCase : Optional[int] =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowercase , __lowercase , __lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__lowercase ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase : int =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase : List[Any] ={'''image_embeds''': image_embeds, '''hint''': hint}
__lowerCamelCase : Union[str, Any] =self.unet(
sample=__lowercase , timestep=__lowercase , encoder_hidden_states=__lowercase , added_cond_kwargs=__lowercase , return_dict=__lowercase , )[0]
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase : Dict =noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase , __lowerCamelCase : int =noise_pred.chunk(2 )
__lowerCamelCase , __lowerCamelCase : Tuple =variance_pred.chunk(2 )
__lowerCamelCase : Tuple =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase : Any =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase , __lowerCamelCase : Any =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : Tuple =self.scheduler.step(
__lowercase , __lowercase , __lowercase , generator=__lowercase , )[0]
# post-processing
__lowerCamelCase : List[str] =self.movq.decode(__lowercase , force_not_quantize=__lowercase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase : List[str] =image * 0.5 + 0.5
__lowerCamelCase : Optional[int] =image.clamp(0 , 1 )
__lowerCamelCase : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase : Optional[Any] =self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 363
| 0
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("check_bouncy() accepts only integer arguments" )
_lowerCamelCase : Any = str(_lowerCamelCase )
_lowerCamelCase : List[str] = "".join(sorted(_lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowerCamelCase_( _lowerCamelCase = 99 ) -> int:
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : List[Any] = 1
while True:
if check_bouncy(_lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 46
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : List[Any] = '''xlm-roberta-xl'''
def __init__( self : Union[str, Any] , _snake_case : Any=25_0880 , _snake_case : Optional[Any]=2560 , _snake_case : Any=36 , _snake_case : Union[str, Any]=32 , _snake_case : int=1_0240 , _snake_case : int="gelu" , _snake_case : Dict=0.1 , _snake_case : Any=0.1 , _snake_case : Optional[int]=514 , _snake_case : int=1 , _snake_case : Optional[Any]=0.02 , _snake_case : str=1E-0_5 , _snake_case : Tuple=1 , _snake_case : str=0 , _snake_case : str=2 , _snake_case : int="absolute" , _snake_case : Any=True , _snake_case : Dict=None , **_snake_case : Dict , ):
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
__lowercase : Union[str, Any] = vocab_size
__lowercase : Dict = hidden_size
__lowercase : Optional[int] = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : Optional[Any] = hidden_act
__lowercase : Any = intermediate_size
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : Optional[int] = type_vocab_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : Union[str, Any] = layer_norm_eps
__lowercase : List[str] = position_embedding_type
__lowercase : Optional[Any] = use_cache
__lowercase : List[str] = classifier_dropout
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def snake_case_ ( self : Optional[Any] ):
if self.task == "multiple-choice":
__lowercase : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 284
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 284
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = SamImageProcessor()
__lowerCAmelCase = SamProcessor(lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] , **snake_case__ : List[str] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
__lowerCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=lowerCamelCase_ )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(lowerCamelCase_ , return_tensors="np" )
__lowerCAmelCase = processor(images=lowerCamelCase_ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=lowerCamelCase_ )
__lowerCAmelCase = [torch.ones((1, 3, 5, 5) )]
__lowerCAmelCase = [[1_764, 2_646]]
__lowerCAmelCase = [[683, 1_024]]
__lowerCAmelCase = processor.post_process_masks(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__lowerCAmelCase = processor.post_process_masks(
lowerCamelCase_ , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
__lowerCAmelCase = [np.ones((1, 3, 5, 5) )]
__lowerCAmelCase = processor.post_process_masks(lowerCamelCase_ , np.array(lowerCamelCase_ ) , np.array(lowerCamelCase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__lowerCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase_ ):
__lowerCAmelCase = processor.post_process_masks(lowerCamelCase_ , np.array(lowerCamelCase_ ) , np.array(lowerCamelCase_ ) )
@require_vision
@require_tf
class a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = SamImageProcessor()
__lowerCAmelCase = SamProcessor(lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Dict , **snake_case__ : List[Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
__lowerCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=lowerCamelCase_ )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(lowerCamelCase_ , return_tensors="np" )
__lowerCAmelCase = processor(images=lowerCamelCase_ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=lowerCamelCase_ )
__lowerCAmelCase = [tf.ones((1, 3, 5, 5) )]
__lowerCAmelCase = [[1_764, 2_646]]
__lowerCAmelCase = [[683, 1_024]]
__lowerCAmelCase = processor.post_process_masks(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__lowerCAmelCase = processor.post_process_masks(
lowerCamelCase_ , tf.convert_to_tensor(lowerCamelCase_ ) , tf.convert_to_tensor(lowerCamelCase_ ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
__lowerCAmelCase = [np.ones((1, 3, 5, 5) )]
__lowerCAmelCase = processor.post_process_masks(
lowerCamelCase_ , np.array(lowerCamelCase_ ) , np.array(lowerCamelCase_ ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__lowerCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowerCAmelCase = processor.post_process_masks(
lowerCamelCase_ , np.array(lowerCamelCase_ ) , np.array(lowerCamelCase_ ) , return_tensors="tf" )
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = SamImageProcessor()
__lowerCAmelCase = SamProcessor(lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=lowerCamelCase_ )
__lowerCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowerCAmelCase = [tf.convert_to_tensor(lowerCamelCase_ )]
__lowerCAmelCase = [torch.tensor(lowerCamelCase_ )]
__lowerCAmelCase = [[1_764, 2_646]]
__lowerCAmelCase = [[683, 1_024]]
__lowerCAmelCase = processor.post_process_masks(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , return_tensors="tf" )
__lowerCAmelCase = processor.post_process_masks(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=lowerCamelCase_ )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(lowerCamelCase_ , return_tensors="pt" )['''pixel_values'''].numpy()
__lowerCAmelCase = processor(images=lowerCamelCase_ , return_tensors="pt" )['''pixel_values'''].numpy()
__lowerCAmelCase = image_processor(lowerCamelCase_ , return_tensors="tf" )['''pixel_values'''].numpy()
__lowerCAmelCase = processor(images=lowerCamelCase_ , return_tensors="tf" )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
| 611
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''camembert'''
def __init__( self ,lowerCamelCase_=30522 ,lowerCamelCase_=768 ,lowerCamelCase_=12 ,lowerCamelCase_=12 ,lowerCamelCase_=3072 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=512 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1e-12 ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,lowerCamelCase_=2 ,lowerCamelCase_="absolute" ,lowerCamelCase_=True ,lowerCamelCase_=None ,**lowerCamelCase_ ,) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : List[Any] = classifier_dropout
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 614
| 0
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=3_0 , lowerCamelCase__=4_0_0 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=True , lowerCamelCase__=1 / 2_5_5 , lowerCamelCase__=True , ):
'''simple docstring'''
UpperCamelCase = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(lowerCamelCase__ , Image.Image ):
UpperCamelCase , UpperCamelCase = image.size
else:
UpperCamelCase , UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase = self.size['''shortest_edge''']
UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase = self.size['''shortest_edge''']
UpperCamelCase = self.size['''shortest_edge''']
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[0] )[0]
UpperCamelCase = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowerCamelCase__ )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowerCamelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
UpperCamelCase = DeformableDetrImageProcessor()
UpperCamelCase = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase__ , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase__ ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase__ , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase__ ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase__ ) )
# verify class_labels
UpperCamelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase__ ) )
# verify orig_size
UpperCamelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase__ ) )
# verify size
UpperCamelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase__ ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , masks_path=lowerCamelCase__ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase__ , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase__ ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase__ , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase__ ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase__ ) )
# verify class_labels
UpperCamelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase__ ) )
# verify masks
UpperCamelCase = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCamelCase__ )
# verify orig_size
UpperCamelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase__ ) )
# verify size
UpperCamelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase__ ) )
| 709
|
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : Optional[int]):
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
UpperCamelCase = len(_UpperCAmelCase) if (len(_UpperCAmelCase) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8), '''Stack'''.center(_UpperCAmelCase), '''Postfix'''.center(_UpperCAmelCase), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCAmelCase) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCAmelCase) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCAmelCase) == 0:
stack.append(_UpperCAmelCase) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCAmelCase) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(_UpperCAmelCase) # push x to stack
print(
x.center(8), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), sep=''' | ''', ) # Output in tabular format
while len(_UpperCAmelCase) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
''' '''.center(8), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), sep=''' | ''', ) # Output in tabular format
return "".join(_UpperCAmelCase) # return Postfix as str
def __snake_case ( _UpperCAmelCase : str):
UpperCamelCase = list(infix[::-1]) # reverse the infix equation
for i in range(len(_UpperCAmelCase)):
if infix[i] == "(":
UpperCamelCase = ''')''' # change "(" to ")"
elif infix[i] == ")":
UpperCamelCase = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_UpperCAmelCase)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
snake_case_ : str = input('\nEnter an Infix Equation = ') # Input an Infix equation
snake_case_ : Union[str, Any] = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 350
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = KandinskyVaaPipeline
lowerCAmelCase = [
"""image_embeds""",
"""negative_image_embeds""",
]
lowerCAmelCase = ["""image_embeds""", """negative_image_embeds"""]
lowerCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase = False
@property
def __a ( self : Optional[Any] ):
return 32
@property
def __a ( self : Optional[int] ):
return 32
@property
def __a ( self : Any ):
return self.time_input_dim
@property
def __a ( self : int ):
return self.time_input_dim * 4
@property
def __a ( self : Dict ):
return 100
@property
def __a ( self : int ):
torch.manual_seed(0 )
A = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A = UNetaDConditionModel(**_lowercase )
return model
@property
def __a ( self : Any ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self : Dict ):
torch.manual_seed(0 )
A = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self : str ):
A = self.dummy_unet
A = self.dummy_movq
A = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=_lowercase , )
A = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self : int , _lowercase : List[str] , _lowercase : str=0 ):
A = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
A = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __a ( self : str ):
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**_lowercase )
A = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = pipe(**self.get_dummy_inputs(_lowercase ) )
A = output.images
A = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Optional[Any] ):
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
A = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
A = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
A = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
A = 'red cat, 4k photo'
A = torch.Generator(device='cuda' ).manual_seed(0 )
A , A = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
A = torch.Generator(device='cuda' ).manual_seed(0 )
A = pipeline(
image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , output_type='np' , )
A = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 690
|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8
| 0
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("check_bouncy() accepts only integer arguments" )
_lowerCamelCase : Any = str(_lowerCamelCase )
_lowerCamelCase : List[str] = "".join(sorted(_lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowerCamelCase_( _lowerCamelCase = 99 ) -> int:
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : List[Any] = 1
while True:
if check_bouncy(_lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 46
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : int = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
a_ : Any = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
a_ : List[Any] = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = RoFormerTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or pre_tok_state.get("strip_accents" , UpperCamelCase ) != strip_accents
):
lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = pre_tok_class(**UpperCamelCase )
lowerCamelCase_ = do_lower_case
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = BertPreTokenizer()
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
lowerCamelCase_ = self.__dict__["_tokenizer"].get_vocab()
lowerCamelCase_ = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase ) )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = BertPreTokenizer()
return super().save_pretrained(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
| 675
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__: Optional[int] = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[Any] = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[Any] = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A__: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719
|
from __future__ import annotations
def lowerCAmelCase_ ( A_ ,A_ ,A_):
if (voltage, current, resistance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if resistance < 0:
raise ValueError("Resistance cannot be negative")
if voltage == 0:
return {"voltage": float(current * resistance)}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221
| 0
|
'''simple docstring'''
from __future__ import annotations
def A (__lowerCamelCase :str ):
return [ord(__lowerCamelCase ) - 96 for elem in plain]
def A (__lowerCamelCase :list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def A ():
_lowerCAmelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , __lowerCamelCase )
print("""Decoded:""" , decode(__lowerCamelCase ) )
if __name__ == "__main__":
main()
| 5
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_lowercase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_lowercase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_lowercase = BeautifulSoup(res.text, """html.parser""")
_lowercase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 5
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=3 , a__=None , a__=2 , ) -> str:
'''simple docstring'''
__snake_case :List[Any] = parent
__snake_case :Dict = batch_size
__snake_case :List[str] = image_size
__snake_case :Dict = patch_size
__snake_case :List[Any] = num_channels
__snake_case :Optional[int] = is_training
__snake_case :Tuple = use_labels
__snake_case :Any = hidden_size
__snake_case :Optional[Any] = num_hidden_layers
__snake_case :Optional[Any] = num_attention_heads
__snake_case :Any = intermediate_size
__snake_case :Dict = hidden_act
__snake_case :str = hidden_dropout_prob
__snake_case :Optional[Any] = attention_probs_dropout_prob
__snake_case :Optional[Any] = type_sequence_label_size
__snake_case :Union[str, Any] = initializer_range
__snake_case :List[str] = scope
__snake_case :Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__snake_case :List[Any] = (image_size // patch_size) ** 2
__snake_case :Any = num_patches + 2
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case :Union[str, Any] = None
if self.use_labels:
__snake_case :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case :List[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase ( self , a__ , a__ , a__ ) -> Tuple:
'''simple docstring'''
__snake_case :Tuple = TFDeiTModel(config=a__ )
__snake_case :Any = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
__snake_case :Any = TFDeiTForMaskedImageModeling(config=a__ )
__snake_case :List[str] = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case :List[str] = 1
__snake_case :int = TFDeiTForMaskedImageModeling(a__ )
__snake_case :Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case :List[Any] = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase ( self , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
__snake_case :Any = self.type_sequence_label_size
__snake_case :str = TFDeiTForImageClassification(a__ )
__snake_case :List[str] = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case :Tuple = 1
__snake_case :List[str] = TFDeiTForImageClassification(a__ )
__snake_case :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case :Optional[Any] = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Dict = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case :str = config_and_inputs
__snake_case :List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__ ( lowercase_ , lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : Dict = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase : Optional[int] = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : List[Any] = False
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Optional[int] = TFDeiTModelTester(self )
__snake_case :Dict = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __lowercase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def __lowercase ( self ) -> str:
'''simple docstring'''
pass
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case , __snake_case :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case :List[str] = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__snake_case :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , tf.keras.layers.Dense ) )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case , __snake_case :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case :Tuple = model_class(a__ )
__snake_case :Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case :Any = [*signature.parameters.keys()]
__snake_case :Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def __lowercase ( self , a__ , a__ , a__=False ) -> Any:
'''simple docstring'''
__snake_case :Tuple = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :Union[str, Any] = TFDeiTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :Any = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
__snake_case :Dict = self.default_image_processor
__snake_case :List[Any] = prepare_img()
__snake_case :Optional[Any] = image_processor(images=a__ , return_tensors="""tf""" )
# forward pass
__snake_case :Optional[int] = model(**a__ )
# verify the logits
__snake_case :Tuple = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case :Tuple = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
| 291
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = "align_text_model"
def __init__( self , a__=3_05_22 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1e-12 , a__=0 , a__="absolute" , a__=True , **a__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
__snake_case :Optional[int] = vocab_size
__snake_case :List[str] = hidden_size
__snake_case :Optional[Any] = num_hidden_layers
__snake_case :int = num_attention_heads
__snake_case :Optional[Any] = hidden_act
__snake_case :Union[str, Any] = intermediate_size
__snake_case :int = hidden_dropout_prob
__snake_case :Optional[Any] = attention_probs_dropout_prob
__snake_case :List[str] = max_position_embeddings
__snake_case :List[str] = type_vocab_size
__snake_case :Union[str, Any] = initializer_range
__snake_case :str = layer_norm_eps
__snake_case :Any = position_embedding_type
__snake_case :List[str] = use_cache
__snake_case :Optional[int] = pad_token_id
@classmethod
def __lowercase ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
__snake_case , __snake_case :Tuple = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__snake_case :Any = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a__ , **a__ )
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Optional[int] = "align_vision_model"
def __init__( self , a__ = 3 , a__ = 6_00 , a__ = 2.0 , a__ = 3.1 , a__ = 8 , a__ = [3, 3, 5, 3, 5, 5, 3] , a__ = [32, 16, 24, 40, 80, 1_12, 1_92] , a__ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , a__ = [] , a__ = [1, 2, 2, 2, 1, 2, 1] , a__ = [1, 2, 2, 3, 3, 4, 1] , a__ = [1, 6, 6, 6, 6, 6, 6] , a__ = 0.25 , a__ = "swish" , a__ = 25_60 , a__ = "mean" , a__ = 0.02 , a__ = 0.0_01 , a__ = 0.99 , a__ = 0.2 , **a__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**a__ )
__snake_case :Union[str, Any] = num_channels
__snake_case :List[str] = image_size
__snake_case :int = width_coefficient
__snake_case :int = depth_coefficient
__snake_case :List[Any] = depth_divisor
__snake_case :Any = kernel_sizes
__snake_case :Optional[int] = in_channels
__snake_case :Optional[int] = out_channels
__snake_case :int = depthwise_padding
__snake_case :List[str] = strides
__snake_case :Union[str, Any] = num_block_repeats
__snake_case :Dict = expand_ratios
__snake_case :Union[str, Any] = squeeze_expansion_ratio
__snake_case :Any = hidden_act
__snake_case :Optional[Any] = hidden_dim
__snake_case :Union[str, Any] = pooling_type
__snake_case :Union[str, Any] = initializer_range
__snake_case :Optional[Any] = batch_norm_eps
__snake_case :List[Any] = batch_norm_momentum
__snake_case :Optional[int] = drop_connect_rate
__snake_case :Union[str, Any] = sum(a__ ) * 4
@classmethod
def __lowercase ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
__snake_case , __snake_case :int = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__snake_case :str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a__ , **a__ )
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : str = "align"
lowerCamelCase : Union[str, Any] = True
def __init__( self , a__=None , a__=None , a__=6_40 , a__=1.0 , a__=0.02 , **a__ , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
__snake_case :Union[str, Any] = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
__snake_case :str = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
__snake_case :List[Any] = AlignTextConfig(**a__ )
__snake_case :Tuple = AlignVisionConfig(**a__ )
__snake_case :Tuple = projection_dim
__snake_case :int = temperature_init_value
__snake_case :Any = initializer_range
@classmethod
def __lowercase ( cls , a__ , a__ , **a__ ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[Any] = copy.deepcopy(self.__dict__ )
__snake_case :Dict = self.text_config.to_dict()
__snake_case :Union[str, Any] = self.vision_config.to_dict()
__snake_case :List[Any] = self.__class__.model_type
return output
| 291
| 1
|
"""simple docstring"""
def _UpperCamelCase ( _A = 1_0_0_0 ) -> int:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase = 1, 1
_UpperCAmelCase = 2
while True:
_UpperCAmelCase = 0
_UpperCAmelCase = fa + fa
_UpperCAmelCase ,_UpperCAmelCase = fa, f
index += 1
for _ in str(_A ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 555
|
"""simple docstring"""
def _UpperCamelCase ( _A = 1_0_0_0 ) -> int:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase = 1, 1
_UpperCAmelCase = 2
while True:
_UpperCAmelCase = 0
_UpperCAmelCase = fa + fa
_UpperCAmelCase ,_UpperCAmelCase = fa, f
index += 1
for _ in str(_A ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 555
| 1
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
_lowerCamelCase : List[Any] = parser.parse_args()
_lowerCamelCase : Optional[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 516
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
_lowerCamelCase : List[Any] = parser.parse_args()
_lowerCamelCase : Optional[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 516
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__)
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowercase = """backbone.""" if is_semantic else """"""
lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", """beit.embeddings.cls_token"""),
(f"""{prefix}patch_embed.proj.weight""", """beit.embeddings.patch_embeddings.projection.weight"""),
(f"""{prefix}patch_embed.proj.bias""", """beit.embeddings.patch_embeddings.projection.bias"""),
(f"""{prefix}pos_embed""", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
for i in range(config.num_hidden_layers ):
lowercase = """backbone.""" if is_semantic else """"""
# queries, keys and values
lowercase = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowercase = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowercase = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowercase = in_proj_weight[
: config.hidden_size, :
]
lowercase = q_bias
lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase = in_proj_weight[
-config.hidden_size :, :
]
lowercase = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowercase = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowercase = gamma_a
lowercase = gamma_a
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = dct.pop(lowerCAmelCase__ )
lowercase = val
def UpperCamelCase__ ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
lowercase = False if """rvlcdip""" in checkpoint_url else True
lowercase = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase__ ,use_mask_token=lowerCAmelCase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase = 1_024
lowercase = 4_096
lowercase = 24
lowercase = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase = 16
lowercase = """huggingface/label-files"""
lowercase = """rvlcdip-id2label.json"""
lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type="""dataset""" ) ,"""r""" ) )
lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location="""cpu""" )["""model"""]
lowercase = create_rename_keys(lowerCAmelCase__ ,has_lm_head=lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ ,lowerCAmelCase__ ,has_lm_head=lowerCAmelCase__ )
# load HuggingFace model
lowercase = BeitForMaskedImageModeling(lowerCAmelCase__ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
lowercase = BeitImageProcessor(
size=config.image_size ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ )
lowercase = prepare_img()
lowercase = image_processor(images=lowerCAmelCase__ ,return_tensors="""pt""" )
lowercase = encoding["""pixel_values"""]
lowercase = model(lowerCAmelCase__ )
lowercase = outputs.logits
# verify logits
lowercase = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(lowerCAmelCase__ ), "Shape of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
if has_lm_head:
lowercase = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
lowercase = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ ,lowerCAmelCase__ ) ,organization="""nielsr""" ,commit_message="""Add image processor""" ,use_temp_dir=lowerCAmelCase__ ,)
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ ,lowerCAmelCase__ ) ,organization="""nielsr""" ,commit_message="""Add model""" ,use_temp_dir=lowerCAmelCase__ ,)
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int =argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
__SCREAMING_SNAKE_CASE : Any =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 428
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class A_ :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *snake_case__ : str , **snake_case__ : Dict ):
pass
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = np.array(lowerCAmelCase__ )
lowercase = npimg.shape
return {"hash": hashimage(lowerCAmelCase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
_A :Optional[int] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_A :str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
lowercase = MaskGenerationPipeline(model=snake_case__ , image_processor=snake_case__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowercase = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
lowercase = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_56 )
# Shortening by hashing
lowercase = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0_444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0_167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0_132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0_053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9_967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9_909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9_879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9_834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9_716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9_612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9_599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9_552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9_532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9_516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9_499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9_483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9_464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9_408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9_335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9_326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9_262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8_999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8_986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8_984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8_873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = """facebook/sam-vit-huge"""
lowercase = pipeline("""mask-generation""" , model=snake_case__ )
lowercase = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
lowercase = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0_444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0_210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0_167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0_132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0_053},
] , )
| 428
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __A ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase__ ( self : Tuple , __snake_case : int , __snake_case : List[Any] , __snake_case : List[str] ) -> Any:
__magic_name__: Optional[Any] = TextaTextGenerationPipeline(model=__snake_case , tokenizer=__snake_case )
return generator, ["Something to write", "Something else"]
def lowerCamelCase__ ( self : int , __snake_case : List[str] , __snake_case : List[Any] ) -> Optional[Any]:
__magic_name__: Dict = generator("""Something there""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
__magic_name__: List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
__magic_name__: List[Any] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
with self.assertRaises(__snake_case ):
generator(4 )
@require_torch
def lowerCamelCase__ ( self : Any ) -> Dict:
__magic_name__: List[str] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
__magic_name__: Any = generator("""Something there""" , do_sample=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": """"""}] )
__magic_name__: Optional[Any] = 3
__magic_name__: Optional[Any] = generator(
"""Something there""" , num_return_sequences=__snake_case , num_beams=__snake_case , )
__magic_name__: Union[str, Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(__snake_case , __snake_case )
__magic_name__: Optional[int] = generator("""This is a test""" , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case )
self.assertEqual(
__snake_case , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
__magic_name__: Optional[int] = generator.model.config.eos_token_id
__magic_name__: int = """<pad>"""
__magic_name__: Tuple = generator(
["""This is a test""", """This is a second test"""] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , )
self.assertEqual(
__snake_case , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
__magic_name__: Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
__magic_name__: List[str] = generator("""Something there""" , do_sample=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": """"""}] )
| 717
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowerCamelCase = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int ) -> Union[str, Any]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a ( __UpperCAmelCase : int ) -> Tuple:
__magic_name__: int = _TestCommandArgs(dataset=__UpperCAmelCase , all_configs=__UpperCAmelCase , save_infos=__UpperCAmelCase )
__magic_name__: List[str] = TestCommand(*__UpperCAmelCase )
test_command.run()
__magic_name__: Union[str, Any] = os.path.join(__UpperCAmelCase , """README.md""" )
assert os.path.exists(__UpperCAmelCase )
__magic_name__: str = DatasetInfosDict.from_directory(__UpperCAmelCase )
__magic_name__: Optional[int] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__magic_name__, __magic_name__: Tuple = getattr(dataset_infos["""default"""] , __UpperCAmelCase ), getattr(expected_dataset_infos["""default"""] , __UpperCAmelCase )
if key == "num_bytes":
assert is_apercent_close(__UpperCAmelCase , __UpperCAmelCase )
elif key == "splits":
assert list(__UpperCAmelCase ) == list(__UpperCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 213
| 0
|
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
snake_case_ : Dict = grid[0]
for row_n in range(1 , len(_UpperCamelCase ) ):
snake_case_ : List[Any] = grid[row_n]
snake_case_ : List[Any] = fill_row(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[int] = grid[row_n]
return grid[-1][-1]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(_UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Tuple = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = '''time_series_transformer'''
_snake_case = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "student_t" , lowerCamelCase__ = "nll" , lowerCamelCase__ = 1 , lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase__ = "mean" , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 3_2 , lowerCamelCase__ = 3_2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = True , lowerCamelCase__ = "gelu" , lowerCamelCase__ = 6_4 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 1_0_0 , lowerCamelCase__ = 0.02 , lowerCamelCase__=True , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = prediction_length
UpperCamelCase = context_length or prediction_length
UpperCamelCase = distribution_output
UpperCamelCase = loss
UpperCamelCase = input_size
UpperCamelCase = num_time_features
UpperCamelCase = lags_sequence
UpperCamelCase = scaling
UpperCamelCase = num_dynamic_real_features
UpperCamelCase = num_static_real_features
UpperCamelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCamelCase = cardinality
else:
UpperCamelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCamelCase = embedding_dimension
else:
UpperCamelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase = input_size * len(lowerCamelCase__ ) + self._number_of_features
UpperCamelCase = d_model
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_attention_heads
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = decoder_layers
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = use_cache
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 212
| 0
|
from collections import deque
from .hash_table import HashTable
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *snake_case__ : Optional[int] , **snake_case__ : str ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
A =deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(snake_case__ )
A =self.values[key]
def _a ( self : Dict ):
"""simple docstring"""
return (
sum(self.charge_factor - len(snake_case__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _a ( self : Dict , snake_case__ : Dict , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(snake_case__ ) == 0
):
return key
return super()._collision_resolution(snake_case__ , snake_case__ )
| 689
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.txt"""}
__a = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__a = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as reader:
A =reader.readlines()
for index, token in enumerate(a_ ):
A =token.rstrip("\n" )
A =index
return vocab
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ):
"""simple docstring"""
A =vocab
A =unk_token
A =max_input_chars_per_word
def _a ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A =0
A =[]
while start < len(snake_case__ ):
A =len(snake_case__ )
A =None
while start < end:
A ="".join(chars[start:end] )
if substr in self.vocab:
A =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A =end
return sub_tokens
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = False
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
A =bod_token
A =eod_token
A =load_vocab(snake_case__ )
A =self.encoder[space_token]
A =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
A ={v: k for k, v in self.encoder.items()}
A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =[]
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ):
"""simple docstring"""
A =[i for i in token_ids if i >= 0]
A =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def _a ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
return token in self.encoder
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(snake_case__ ):
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A =(filename_prefix + "-" if filename_prefix else "") + save_directory
A =0
if " " in self.encoder:
A =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A =self.encoder["\n"]
del self.encoder["\n"]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 689
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase = 250_004
_lowercase = 250_020
@require_sentencepiece
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = MBartTokenizer
_UpperCAmelCase = MBartTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def UpperCamelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = MBartTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ) -> int:
snake_case = MBartTokenizer(A__ , keep_accents=A__ )
snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
snake_case = self.tokenizer_class.from_pretrained(A__ , **A__ )
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(A__ )
snake_case = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(A__ )
snake_case = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=True
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
snake_case = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(A__ )
snake_case = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=False
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
snake_case = tokenizer_p.save_pretrained(A__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(A__ )
snake_case = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = '''facebook/mbart-large-en-ro'''
_UpperCAmelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_UpperCAmelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_UpperCAmelCase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def UpperCamelCase ( cls ) -> Optional[Any]:
snake_case = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
snake_case = 1
return cls
def UpperCamelCase ( self ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertIn(A__ , self.tokenizer.all_special_ids )
snake_case = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
snake_case = self.tokenizer.decode(A__ , skip_special_tokens=A__ )
snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A__ )
self.assertEqual(A__ , A__ )
self.assertNotIn(self.tokenizer.eos_token , A__ )
def UpperCamelCase ( self ) -> Tuple:
snake_case = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , A__ )
snake_case = 10
snake_case = self.tokenizer(A__ , max_length=A__ , truncation=A__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A__ )
self.assertEqual(len(A__ ) , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_26, 25_00_01] )
def UpperCamelCase ( self ) -> Dict:
snake_case = tempfile.mkdtemp()
snake_case = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A__ )
snake_case = MBartTokenizer.from_pretrained(A__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A__ )
@require_torch
def UpperCamelCase ( self ) -> Dict:
snake_case = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A__ , return_tensors='''pt''' )
snake_case = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
snake_case = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(A__ , A__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.tokenizer(self.src_text , padding=A__ , truncation=A__ , max_length=3 , return_tensors='''pt''' )
snake_case = self.tokenizer(
text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=10 , return_tensors='''pt''' )
snake_case = targets['''input_ids''']
snake_case = shift_tokens_right(A__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(A__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 342
|
'''simple docstring'''
_lowercase = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_58_18,
}
def __UpperCamelCase ( a : str , a : str , a : float ) ->float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(a )}"""
)
raise ValueError(a )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCAmelCase : int ={
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] =[
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693
|
import torch
from diffusers import DiffusionPipeline
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ ) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : str = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 693
| 1
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : int = (KDPMaDiscreteScheduler,)
__UpperCAmelCase : Optional[int] = 10
def _UpperCamelCase ( self , **a_ ):
lowerCamelCase_ : str = {
"num_train_timesteps": 1100,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a_ )
return config
def _UpperCamelCase ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a_ )
def _UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=a_ , beta_end=a_ )
def _UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a_ )
def _UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.scheduler_classes[0]
lowerCamelCase_ : Optional[int] = self.get_scheduler_config(prediction_type="v_prediction" )
lowerCamelCase_ : List[Any] = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ : List[Any] = self.dummy_model()
lowerCamelCase_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ : List[Any] = sample.to(a_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : Optional[Any] = scheduler.scale_model_input(a_ , a_ )
lowerCamelCase_ : int = model(a_ , a_ )
lowerCamelCase_ : Optional[int] = scheduler.step(a_ , a_ , a_ )
lowerCamelCase_ : Optional[Any] = output.prev_sample
lowerCamelCase_ : Union[str, Any] = torch.sum(torch.abs(a_ ) )
lowerCamelCase_ : str = torch.mean(torch.abs(a_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def _UpperCamelCase ( self ):
if torch_device == "mps":
return
lowerCamelCase_ : int = self.scheduler_classes[0]
lowerCamelCase_ : List[str] = self.get_scheduler_config()
lowerCamelCase_ : Optional[Any] = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ : Any = self.dummy_model()
lowerCamelCase_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ : List[Any] = sample.to(a_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : Optional[Any] = scheduler.scale_model_input(a_ , a_ )
lowerCamelCase_ : Optional[int] = model(a_ , a_ )
lowerCamelCase_ : Tuple = scheduler.step(a_ , a_ , a_ )
lowerCamelCase_ : Dict = output.prev_sample
lowerCamelCase_ : Dict = torch.sum(torch.abs(a_ ) )
lowerCamelCase_ : int = torch.mean(torch.abs(a_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def _UpperCamelCase ( self ):
if torch_device == "mps":
return
lowerCamelCase_ : Tuple = self.scheduler_classes[0]
lowerCamelCase_ : int = self.get_scheduler_config()
lowerCamelCase_ : int = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps , device=a_ )
lowerCamelCase_ : Any = self.dummy_model()
lowerCamelCase_ : Any = self.dummy_sample_deter.to(a_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase_ : Tuple = scheduler.scale_model_input(a_ , a_ )
lowerCamelCase_ : Union[str, Any] = model(a_ , a_ )
lowerCamelCase_ : List[Any] = scheduler.step(a_ , a_ , a_ )
lowerCamelCase_ : Dict = output.prev_sample
lowerCamelCase_ : str = torch.sum(torch.abs(a_ ) )
lowerCamelCase_ : List[str] = torch.mean(torch.abs(a_ ) )
if str(a_ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 250
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.dummy_uncond_unet
lowerCamelCase_ : Union[str, Any] = KarrasVeScheduler()
lowerCamelCase_ : Any = KarrasVePipeline(unet=a_ , scheduler=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : List[str] = pipe(num_inference_steps=2 , generator=a_ , output_type="numpy" ).images
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = pipe(num_inference_steps=2 , generator=a_ , output_type="numpy" , return_dict=a_ )[0]
lowerCamelCase_ : Tuple = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = "google/ncsnpp-celebahq-256"
lowerCamelCase_ : Optional[int] = UNetaDModel.from_pretrained(a_ )
lowerCamelCase_ : Any = KarrasVeScheduler()
lowerCamelCase_ : Dict = KarrasVePipeline(unet=a_ , scheduler=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=a_ , output_type="numpy" ).images
lowerCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 250
| 1
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase : List[Any] = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 2048-bit
1_4: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 3072-bit
1_5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 4096-bit
1_6: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 6144-bit
1_7: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 8192-bit
1_8: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
}
class a__ :
def __init__( self : str , A_ : int = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError("""Unsupported Group""" )
lowerCamelCase_: str = primes[group]["""prime"""]
lowerCamelCase_: Dict = primes[group]["""generator"""]
lowerCamelCase_: Union[str, Any] = int(hexlify(urandom(32 ) ) , base=16 )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_: Any = pow(self.generator , self.__private_key , self.prime )
return hex(A_ )[2:]
def lowerCAmelCase ( self : Any , A_ : int ) -> bool:
"""simple docstring"""
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(A_ , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCAmelCase ( self : Tuple , A_ : str ) -> str:
"""simple docstring"""
lowerCamelCase_: int = int(A_ , base=16 )
if not self.is_valid_public_key(A_ ):
raise ValueError("""Invalid public key""" )
lowerCamelCase_: List[Any] = pow(A_ , self.__private_key , self.prime )
return shaaaa(str(A_ ).encode() ).hexdigest()
@staticmethod
def lowerCAmelCase ( A_ : int , A_ : int ) -> bool:
"""simple docstring"""
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(A_ , (prime - 1) // 2 , A_ ) == 1
)
@staticmethod
def lowerCAmelCase ( A_ : str , A_ : str , A_ : int = 14 ) -> str:
"""simple docstring"""
lowerCamelCase_: List[str] = int(A_ , base=16 )
lowerCamelCase_: Any = int(A_ , base=16 )
lowerCamelCase_: Optional[int] = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(A_ , A_ ):
raise ValueError("""Invalid public key""" )
lowerCamelCase_: List[str] = pow(A_ , A_ , A_ )
return shaaaa(str(A_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 584
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = DistilBertTokenizer
_A = DistilBertTokenizerFast
_A = True
@slow
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Any = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowerCamelCase_: str = tokenizer.encode("""sequence builders""" , add_special_tokens=A_ )
lowerCamelCase_: List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A_ )
lowerCamelCase_: int = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 584
| 1
|
"""simple docstring"""
from math import sqrt
def A_ ( snake_case__ = 1_00_00_00 ) -> int:
_UpperCamelCase :int = 0
_UpperCamelCase :int = 0
_UpperCamelCase :int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(snake_case__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 355
|
"""simple docstring"""
def A_ ( snake_case__ , snake_case__ = " " ) -> list:
_UpperCamelCase :List[str] = []
_UpperCamelCase :int = 0
for index, char in enumerate(snake_case__ ):
if char == separator:
split_words.append(string[last_index:index] )
_UpperCamelCase :Dict = index + 1
elif index + 1 == len(snake_case__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355
| 1
|
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10)]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(lowerCAmelCase , i + 1) for i in range(10)]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1)]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4), range(4 , 7), range(7 , 10)]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1), range(1 , 2), range(2 , 3)]),
] , )
def lowercase ( lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]):
"""simple docstring"""
_A : Optional[Any] = _distribute_shards(**lowerCAmelCase)
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def lowercase ( lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]):
"""simple docstring"""
_A : Union[str, Any] = _split_gen_kwargs(lowerCAmelCase , lowerCAmelCase)
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def lowercase ( lowerCAmelCase : List[str] , lowerCAmelCase : int):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase):
_number_of_shards_in_gen_kwargs(lowerCAmelCase)
else:
_A : Tuple = _number_of_shards_in_gen_kwargs(lowerCAmelCase)
assert out == expected
| 417
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Optional[Any] = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''ConvNextFeatureExtractor''']
__UpperCamelCase : Union[str, Any] = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 417
| 1
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = parent
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return {}
def __lowercase ( ) -> int:
'''simple docstring'''
_A = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
_A = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = MarkupLMFeatureExtractionTester(self )
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.feature_extraction_class()
# Test not batched input
_A = get_html_strings()[0]
_A = feature_extractor(__UpperCAmelCase )
# fmt: off
_A = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
_A = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , __UpperCAmelCase )
self.assertEqual(encoding.xpaths , __UpperCAmelCase )
# Test batched
_A = get_html_strings()
_A = feature_extractor(__UpperCAmelCase )
# fmt: off
_A = expected_nodes + [["My First Heading", "My first paragraph."]]
_A = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __UpperCAmelCase )
self.assertEqual(encoding.xpaths , __UpperCAmelCase )
| 330
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = dataset
_A = process
_A = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : str , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = self.dataset[i]
_A = self.process(__UpperCAmelCase , **self.params )
return processed
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Tuple=None ):
'''simple docstring'''
_A = loader
_A = infer
_A = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_A = None
_A = loader_batch_size
# Internal bookkeeping
_A = None
_A = None
def __len__( self : Dict ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[Any] ):
'''simple docstring'''
_A = iter(self.loader )
return self
def lowerCAmelCase ( self : str ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_A = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_A = {}
for k, element in self._loader_batch_data.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# Convert ModelOutput to tuple first
_A = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_A = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_A = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_A = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_A = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_A = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_A = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_A = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_A = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_A = self._loader_batch_data.__class__(__UpperCAmelCase )
self._loader_batch_index += 1
return result
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_A = next(self.iterator )
_A = self.infer(__UpperCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(__UpperCAmelCase , torch.Tensor ):
_A = processed
else:
_A = list(processed.keys() )[0]
_A = processed[key]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = len(__UpperCAmelCase )
else:
_A = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_A = observed_batch_size
# Setting internal index to unwrap the batch
_A = processed
_A = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any]=None ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __iter__( self : List[str] ):
'''simple docstring'''
_A = iter(self.loader )
_A = None
return self
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.subiterator is None:
_A = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_A = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_A = self.infer(next(self.iterator ) , **self.params )
_A = next(self.subiterator )
return processed
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
_A = iter(self.loader )
return self
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = False
_A = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_A = self.loader_batch_item()
_A = item.pop("is_last" )
accumulator.append(__UpperCAmelCase )
if is_last:
return accumulator
while not is_last:
_A = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(__UpperCAmelCase , torch.Tensor ):
_A = processed
else:
_A = list(processed.keys() )[0]
_A = processed[key]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = len(__UpperCAmelCase )
else:
_A = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_A = observed_batch_size
_A = processed
_A = 0
while self._loader_batch_index < self.loader_batch_size:
_A = self.loader_batch_item()
_A = item.pop("is_last" )
accumulator.append(__UpperCAmelCase )
if is_last:
return accumulator
else:
_A = processed
_A = item.pop("is_last" )
accumulator.append(__UpperCAmelCase )
return accumulator
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : Dataset , __UpperCAmelCase : str ):
'''simple docstring'''
_A = dataset
_A = key
def __len__( self : str ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple , __UpperCAmelCase : str ):
'''simple docstring'''
return self.dataset[i][self.key]
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Dataset , __UpperCAmelCase : str , __UpperCAmelCase : str ):
'''simple docstring'''
_A = dataset
_A = keya
_A = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : int , __UpperCAmelCase : Tuple ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 330
| 1
|
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a__ = pytest.mark.integration
a__ = {"""comet"""}
a__ = importlib.util.find_spec("""fairseq""") is not None
a__ = {"""code_eval"""}
a__ = os.name == """nt"""
a__ = {"""bertscore""", """frugalscore""", """perplexity"""}
a__ = importlib.util.find_spec("""transformers""") is not None
def _UpperCAmelCase ( a : Any ):
@wraps(a )
def wrapper(self : int , a : Tuple ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , a )
return wrapper
def _UpperCAmelCase ( a : Tuple ):
@wraps(a )
def wrapper(self : Optional[int] , a : List[Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , a )
return wrapper
def _UpperCAmelCase ( a : Any ):
@wraps(a )
def wrapper(self : List[Any] , a : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , a )
return wrapper
def _UpperCAmelCase ( ):
snake_case__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowercase_ , lowercase_ , lowercase_ )
@local
class _lowerCAmelCase ( parameterized.TestCase ):
"""simple docstring"""
_lowercase : Optional[Any] = {}
_lowercase : Optional[int] = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""")
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""")
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = """[...]"""
snake_case__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , UpperCamelCase__)).module_path)
snake_case__ = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCamelCase__)
# check parameters
snake_case__ = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCamelCase__ , metric_module.__name__):
with self.use_local_metrics():
try:
snake_case__ = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def __magic_name__ ( self : Any , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = """[...]"""
snake_case__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , UpperCamelCase__)).module_path)
# run doctest
with self.use_local_metrics():
snake_case__ = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : List[str]):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCamelCase__):
yield
else:
yield
@contextmanager
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
def load_local_metric(UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any]):
return load_metric(os.path.join("""metrics""" , UpperCamelCase__) , *UpperCamelCase__ , **UpperCamelCase__)
with patch("""datasets.load_metric""") as mock_load_metric:
snake_case__ = load_local_metric
yield
@classmethod
def __magic_name__ ( cls : Optional[Any] , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
def wrapper(UpperCamelCase__ : Dict):
snake_case__ = contextmanager(UpperCamelCase__)
snake_case__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def _UpperCAmelCase ( a : Dict ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any , UpperCamelCase__ : Dict):
'''simple docstring'''
assert len(input_dict["""input_ids"""]) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
snake_case__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def _UpperCAmelCase ( a : str ):
import torch
def bert_cos_score_idf(a : Optional[Any] , a : int , *a : Dict , **a : Any ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(a ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
snake_case__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def _UpperCAmelCase ( a : Optional[Any] ):
def load_from_checkpoint(a : int ):
class _lowerCAmelCase :
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str):
'''simple docstring'''
assert len(UpperCamelCase__) == 2
snake_case__ = [0.19, 0.92]
return scores, sum(UpperCamelCase__) / len(UpperCamelCase__)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
snake_case__ = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
snake_case__ = load_from_checkpoint
yield
def _UpperCAmelCase ( ):
snake_case__ = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
snake_case__ = """ERROR"""
snake_case__ = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(a , match=re.escape(a ) ):
metric.compute(predictions=[] , references=[] , scheme=a )
| 717
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a__ = logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : str , **UpperCamelCase__ : Dict):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case__ = deprecated_arg[3:]
setattr(self , UpperCamelCase__ , not kwargs.pop(UpperCamelCase__))
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''')
snake_case__ = kwargs.pop("""torchscript""" , self.torchscript)
snake_case__ = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics)
snake_case__ = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level)
super().__init__(**UpperCamelCase__)
_lowercase : bool = field(default=lowercase_ , metadata={'''help''': '''Trace the models using torchscript'''} )
_lowercase : bool = field(default=lowercase_ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
_lowercase : str = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
requires_backends(self , ["""torch"""])
logger.info("""PyTorch: setting up devices""")
if not self.cuda:
snake_case__ = torch.device("""cpu""")
snake_case__ = 0
elif is_torch_tpu_available():
snake_case__ = xm.xla_device()
snake_case__ = 0
else:
snake_case__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""")
snake_case__ = torch.cuda.device_count()
return device, n_gpu
@property
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
return self._setup_devices[0]
@property
def __magic_name__ ( self : str):
'''simple docstring'''
requires_backends(self , ["""torch"""])
return self._setup_devices[1]
@property
def __magic_name__ ( self : str):
'''simple docstring'''
return self.n_gpu > 0
| 99
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 44
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 1
|
from math import ceil, sqrt
def __lowerCAmelCase ( UpperCamelCase = 1000000 ) -> int:
lowerCAmelCase__ : Dict = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCAmelCase__ : Optional[int] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCAmelCase__ : List[Any] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 700
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCAmelCase ( UpperCamelCase ) -> str:
for param in module.parameters():
lowerCAmelCase__ : int = False
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ : Optional[Any] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = plt.imshow(UpperCamelCase )
fig.axes.get_xaxis().set_visible(UpperCamelCase )
fig.axes.get_yaxis().set_visible(UpperCamelCase )
plt.show()
def __lowerCAmelCase ( ) -> str:
lowerCAmelCase__ : Dict = datetime.now()
lowerCAmelCase__ : Optional[int] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 470
| 0
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: str ,a: List[str] ,a: Tuple ,a: Optional[Any]=1024 ,a: Optional[Any]=1024 ,a: Union[str, Any]=3.6 ):
__UpperCAmelCase = tokenizer
__UpperCAmelCase = tokenizer.bos_token_id
__UpperCAmelCase = dataset
__UpperCAmelCase = seq_length
__UpperCAmelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self: Dict ):
__UpperCAmelCase = iter(self.dataset )
__UpperCAmelCase = True
while more_examples:
__UpperCAmelCase , __UpperCAmelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(a )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__UpperCAmelCase = False
break
__UpperCAmelCase = tokenizer(a ,truncation=a )['input_ids']
__UpperCAmelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(a ) ,self.seq_length ):
__UpperCAmelCase = all_token_ids[i : i + self.seq_length]
if len(a ) == self.seq_length:
yield torch.tensor(a )
def __snake_case ( lowerCAmelCase : Optional[Any] ):
__UpperCAmelCase = {'streaming': True}
__UpperCAmelCase = load_dataset(args.dataset_name , split='train' , **lowerCAmelCase )
__UpperCAmelCase = ConstantLengthDataset(lowerCAmelCase , lowerCAmelCase , seq_length=args.seq_length )
__UpperCAmelCase = DataLoader(lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def __snake_case ( lowerCAmelCase : Optional[Any] ):
model.eval()
__UpperCAmelCase = []
for step, batch in enumerate(lowerCAmelCase ):
with torch.no_grad():
__UpperCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase )
__UpperCAmelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__UpperCAmelCase = torch.mean(torch.cat(lowerCAmelCase ) )
try:
__UpperCAmelCase = torch.exp(lowerCAmelCase )
except OverflowError:
__UpperCAmelCase = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
_UpperCamelCase : int = Accelerator()
# Parse configuration
_UpperCamelCase : Dict = HfArgumentParser(EvaluationArguments)
_UpperCamelCase : List[Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_UpperCamelCase : int = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_UpperCamelCase : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_UpperCamelCase : str = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_UpperCamelCase : Any = create_dataloader(args)
# Prepare everything with our `accelerator`.
_UpperCamelCase , _UpperCamelCase : Optional[int] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_UpperCamelCase , _UpperCamelCase : Dict = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 396
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = IFInpaintingSuperResolutionPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def snake_case ( self: Optional[Any] ):
return self._get_superresolution_dummy_components()
def snake_case ( self: List[str] ,a: str ,a: Tuple=0 ):
if str(a ).startswith('mps' ):
__UpperCAmelCase = torch.manual_seed(a )
else:
__UpperCAmelCase = torch.Generator(device=a ).manual_seed(a )
__UpperCAmelCase = floats_tensor((1, 3, 16, 16) ,rng=random.Random(a ) ).to(a )
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(a ) ).to(a )
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(a ) ).to(a )
__UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def snake_case ( self: Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' ,reason='float16 requires CUDA' )
def snake_case ( self: Any ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case ( self: Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case ( self: List[Any] ):
self._test_save_load_local()
def snake_case ( self: Tuple ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 396
| 1
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase ( UpperCamelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
_lowerCamelCase = BeautifulSoup(requests.get(UpperCamelCase ).text , 'html.parser' )
_lowerCamelCase = soup.findAll('h1' )
_lowerCamelCase = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(UpperCamelCase , UpperCamelCase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 702
|
from itertools import product
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : int ) -> list[int]:
_lowerCamelCase = sides_number
_lowerCamelCase = max_face_number * dice_number
_lowerCamelCase = [0] * (max_total + 1)
_lowerCamelCase = 1
_lowerCamelCase = range(UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase , repeat=UpperCamelCase ):
_lowerCamelCase = sum(UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase ( ) -> float:
_lowerCamelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_lowerCamelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_lowerCamelCase = 0
_lowerCamelCase = 9
_lowerCamelCase = 4 * 9
_lowerCamelCase = 6
for peter_total in range(UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_lowerCamelCase = (4**9) * (6**6)
_lowerCamelCase = peter_wins_count / total_games_number
_lowerCamelCase = round(UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 234
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase ( A_ ):
A__ : Optional[int] = "cvt"
def __init__(self : Union[str, Any] , snake_case__ : Union[str, Any]=3 , snake_case__ : Any=[7, 3, 3] , snake_case__ : int=[4, 2, 2] , snake_case__ : List[Any]=[2, 1, 1] , snake_case__ : int=[64, 1_92, 3_84] , snake_case__ : Optional[Any]=[1, 3, 6] , snake_case__ : Union[str, Any]=[1, 2, 10] , snake_case__ : Any=[4.0, 4.0, 4.0] , snake_case__ : Union[str, Any]=[0.0, 0.0, 0.0] , snake_case__ : Optional[int]=[0.0, 0.0, 0.0] , snake_case__ : Any=[0.0, 0.0, 0.1] , snake_case__ : Any=[True, True, True] , snake_case__ : int=[False, False, True] , snake_case__ : Optional[int]=["dw_bn", "dw_bn", "dw_bn"] , snake_case__ : Optional[Any]=[3, 3, 3] , snake_case__ : Tuple=[1, 1, 1] , snake_case__ : Optional[int]=[2, 2, 2] , snake_case__ : Dict=[1, 1, 1] , snake_case__ : List[Any]=[1, 1, 1] , snake_case__ : List[str]=0.02 , snake_case__ : str=1e-12 , **snake_case__ : Dict , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : Tuple = num_channels
snake_case : Optional[int] = patch_sizes
snake_case : List[str] = patch_stride
snake_case : Optional[int] = patch_padding
snake_case : List[str] = embed_dim
snake_case : int = num_heads
snake_case : str = depth
snake_case : Optional[Any] = mlp_ratio
snake_case : Tuple = attention_drop_rate
snake_case : Union[str, Any] = drop_rate
snake_case : Tuple = drop_path_rate
snake_case : int = qkv_bias
snake_case : Tuple = cls_token
snake_case : List[str] = qkv_projection_method
snake_case : Optional[int] = kernel_qkv
snake_case : Optional[Any] = padding_kv
snake_case : int = stride_kv
snake_case : Union[str, Any] = padding_q
snake_case : List[str] = stride_q
snake_case : Optional[Any] = initializer_range
snake_case : Dict = layer_norm_eps
| 204
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCamelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
__lowerCamelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
__lowerCamelCase = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str , snake_case__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = 0.0
for i, j in zip(snake_case__ , snake_case__ ):
n_correct += 1.0 if math_equivalence.is_equiv(snake_case__ , snake_case__ ) else 0.0
snake_case : Optional[int] = n_correct / len(snake_case__ )
return {
"accuracy": accuracy,
}
| 204
| 1
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowerCAmelCase ( A ):
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowerCAmelCase ( ):
UpperCAmelCase_ = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=A )
UpperCAmelCase_ = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(A )
EnvironmentCommand.register_subcommand(A )
TestCommand.register_subcommand(A )
RunBeamCommand.register_subcommand(A )
DummyDataCommand.register_subcommand(A )
# Parse args
UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_known_args()
if not hasattr(A , "func" ):
parser.print_help()
exit(1 )
UpperCAmelCase_ = parse_unknown_args(A )
# Run
UpperCAmelCase_ = args.func(A , **A )
service.run()
if __name__ == "__main__":
main()
| 712
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a: str = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = ReformerTokenizer
SCREAMING_SNAKE_CASE__ = ReformerTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
def __A ( self : Dict ):
'''simple docstring'''
super().setUp()
UpperCAmelCase_ = ReformerTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCAmelCase ) , 1_000 )
def __A ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __A ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __A ( self : List[Any] , lowerCAmelCase : Optional[int]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = ReformerTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __A ( self : int ):
'''simple docstring'''
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
UpperCAmelCase_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@require_torch
@slow
def __A ( self : Tuple ):
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ = " ".join(lowerCAmelCase )
UpperCAmelCase_ = self.big_tokenizer.encode_plus(lowerCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
UpperCAmelCase_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ = encoded_sequence["input_ids"].shape
UpperCAmelCase_ = ReformerModel(lowerCAmelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase )
model(**lowerCAmelCase )
@slow
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowerCAmelCase , sequences=lowerCAmelCase , )
| 268
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase_ ( snake_case_ ):
'''simple docstring'''
_snake_case = '''levit'''
def __init__( self , snake_case_=224 , snake_case_=3 , snake_case_=3 , snake_case_=2 , snake_case_=1 , snake_case_=16 , snake_case_=[128, 256, 384] , snake_case_=[4, 8, 12] , snake_case_=[4, 4, 4] , snake_case_=[16, 16, 16] , snake_case_=0 , snake_case_=[2, 2, 2] , snake_case_=[2, 2, 2] , snake_case_=0.02 , **snake_case_ , ) -> List[Any]:
super().__init__(**snake_case_ )
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = kernel_size
__lowerCAmelCase = stride
__lowerCAmelCase = padding
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = depths
__lowerCAmelCase = key_dim
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = patch_size
__lowerCAmelCase = attention_ratio
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = initializer_range
__lowerCAmelCase = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase_ ( snake_case_ ):
'''simple docstring'''
_snake_case = version.parse('''1.11''' )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self ) -> float:
return 1e-4
| 465
|
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : int = 1000 ) ->int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 314
| 0
|
'''simple docstring'''
# Imports
import numpy as np
class __snake_case :
def __init__( self, A=None, A=None, A=None, A=None, A=None ):
"""simple docstring"""
self.set_matricies(red=A, green=A, blue=A, red_edge=A, nir=A )
def UpperCAmelCase_ ( self, A=None, A=None, A=None, A=None, A=None ):
"""simple docstring"""
if red is not None:
lowerCamelCase : Optional[int] = red
if green is not None:
lowerCamelCase : Optional[int] = green
if blue is not None:
lowerCamelCase : List[str] = blue
if red_edge is not None:
lowerCamelCase : Tuple = red_edge
if nir is not None:
lowerCamelCase : Any = nir
return True
def UpperCAmelCase_ ( self, A="", A=None, A=None, A=None, A=None, A=None ):
"""simple docstring"""
self.set_matricies(red=A, green=A, blue=A, red_edge=A, nir=A )
lowerCamelCase : Optional[int] = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase_ ( self, A=0.08, A=1.22, A=0.03 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase_ ( self, A=0.16 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase_ ( self, A=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCAmelCase_ ( self, A=None, A=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase : Tuple = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 449
|
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'])
lowerCamelCase : Dict = MaskFormerConfig(backbone_config=UpperCAmelCase__)
lowerCamelCase : Dict = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase : Union[str, Any] = 8_47
lowerCamelCase : Any = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
lowerCamelCase : str = 1_50
lowerCamelCase : Union[str, Any] = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase : int = 1_71
lowerCamelCase : List[Any] = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
lowerCamelCase : int = 1_33
lowerCamelCase : List[str] = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase : str = 19
lowerCamelCase : Union[str, Any] = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
lowerCamelCase : Union[str, Any] = 65
lowerCamelCase : List[Any] = 'mapillary-vistas-id2label.json'
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset') , 'r'))
lowerCamelCase : str = {int(UpperCAmelCase__): v for k, v in idalabel.items()}
return config
def UpperCAmelCase ( UpperCAmelCase__ : List[str]):
lowerCamelCase : List[Any] = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight'))
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias'))
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight'))
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias'))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias'''))
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight'''))
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight'''))
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias'''))
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight'''))
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias'''))
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight'))
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight'))
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias'))
for source_index, target_index in zip(range(3 , 0 , -1) , range(0 , 3)):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight'''))
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight'''))
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias'''))
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight'''))
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight'''))
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias'''))
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight'))
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias'))
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias'''))
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias'''))
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias'''))
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias'''))
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias'''))
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias'''))
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias'''))
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight'))
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias'))
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight'))
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight'))
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias'))
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight'))
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias'))
for i in range(3):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias'''))
# fmt: on
return rename_keys
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]):
lowerCamelCase : List[str] = dct.pop(UpperCAmelCase__)
lowerCamelCase : Tuple = val
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]):
lowerCamelCase : Union[str, Any] = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
lowerCamelCase : str = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase : Union[str, Any] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''')
lowerCamelCase : int = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Dict = in_proj_weight[:dim, :]
lowerCamelCase : Dict = in_proj_bias[: dim]
lowerCamelCase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase : Dict = in_proj_bias[
dim : dim * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-dim :, :
]
lowerCamelCase : int = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]):
# fmt: off
lowerCamelCase : Tuple = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase : Optional[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''')
lowerCamelCase : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[str] = in_proj_weight[: hidden_size, :]
lowerCamelCase : str = in_proj_bias[:config.hidden_size]
lowerCamelCase : int = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase : List[Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase : Optional[int] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase : Tuple = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''')
lowerCamelCase : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Dict = in_proj_weight[: hidden_size, :]
lowerCamelCase : Any = in_proj_bias[:config.hidden_size]
lowerCamelCase : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase : Union[str, Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase : List[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCAmelCase ( ):
lowerCamelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase : Tuple = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__).raw)
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False):
lowerCamelCase : Optional[Any] = get_maskformer_config(UpperCAmelCase__)
# load original state_dict
with open(UpperCAmelCase__ , 'rb') as f:
lowerCamelCase : Tuple = pickle.load(UpperCAmelCase__)
lowerCamelCase : str = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase : Optional[Any] = create_rename_keys(UpperCAmelCase__)
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
read_in_swin_q_k_v(UpperCAmelCase__ , config.backbone_config)
read_in_decoder_q_k_v(UpperCAmelCase__ , UpperCAmelCase__)
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase : List[str] = torch.from_numpy(UpperCAmelCase__)
# load 🤗 model
lowerCamelCase : str = MaskFormerForInstanceSegmentation(UpperCAmelCase__)
model.eval()
for name, param in model.named_parameters():
print(UpperCAmelCase__ , param.shape)
lowerCamelCase , lowerCamelCase : Optional[Any] = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__)
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCAmelCase__) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
lowerCamelCase : Union[str, Any] = prepare_img()
if "vistas" in model_name:
lowerCamelCase : Optional[int] = 65
elif "cityscapes" in model_name:
lowerCamelCase : Any = 6_55_35
else:
lowerCamelCase : List[Any] = 2_55
lowerCamelCase : str = True if 'ade' in model_name else False
lowerCamelCase : List[Any] = MaskFormerImageProcessor(ignore_index=UpperCAmelCase__ , reduce_labels=UpperCAmelCase__)
lowerCamelCase : str = image_processor(UpperCAmelCase__ , return_tensors='pt')
lowerCamelCase : Dict = model(**UpperCAmelCase__)
print('Logits:' , outputs.class_queries_logits[0, :3, :3])
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase : List[str] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]])
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=1E-4)
print('Looks ok!')
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''')
Path(UpperCAmelCase__).mkdir(exist_ok=UpperCAmelCase__)
model.save_pretrained(UpperCAmelCase__)
image_processor.save_pretrained(UpperCAmelCase__)
if push_to_hub:
print('Pushing model and image processor to the hub...')
model.push_to_hub(F'''nielsr/{model_name}''')
image_processor.push_to_hub(F'''nielsr/{model_name}''')
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 449
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
assert x is not None
assert y is not None
lowerCamelCase__ =len(__lowerCAmelCase )
lowerCamelCase__ =len(__lowerCAmelCase )
# declaring the array for storing the dp values
lowerCamelCase__ =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
lowerCamelCase__ =1 if x[i - 1] == y[j - 1] else 0
lowerCamelCase__ =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
lowerCamelCase__ =""
lowerCamelCase__ , lowerCamelCase__ =m, n
while i > 0 and j > 0:
lowerCamelCase__ =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowerCamelCase__ =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
a ='AGGTAB'
a ='GXTXAYB'
a =4
a ='GTAB'
a , a =longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 530
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a =logging.get_logger(__name__)
a ={
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : str = '''deta'''
A__ : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=900 , _lowerCamelCase=2048 , _lowerCamelCase=6 , _lowerCamelCase=2048 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=True , _lowerCamelCase=300 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , **_lowerCamelCase , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase__ =CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =backbone_config.pop("model_type" )
lowerCamelCase__ =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ =config_class.from_dict(_lowerCamelCase )
lowerCamelCase__ =backbone_config
lowerCamelCase__ =num_queries
lowerCamelCase__ =max_position_embeddings
lowerCamelCase__ =d_model
lowerCamelCase__ =encoder_ffn_dim
lowerCamelCase__ =encoder_layers
lowerCamelCase__ =encoder_attention_heads
lowerCamelCase__ =decoder_ffn_dim
lowerCamelCase__ =decoder_layers
lowerCamelCase__ =decoder_attention_heads
lowerCamelCase__ =dropout
lowerCamelCase__ =attention_dropout
lowerCamelCase__ =activation_dropout
lowerCamelCase__ =activation_function
lowerCamelCase__ =init_std
lowerCamelCase__ =init_xavier_std
lowerCamelCase__ =encoder_layerdrop
lowerCamelCase__ =auxiliary_loss
lowerCamelCase__ =position_embedding_type
# deformable attributes
lowerCamelCase__ =num_feature_levels
lowerCamelCase__ =encoder_n_points
lowerCamelCase__ =decoder_n_points
lowerCamelCase__ =two_stage
lowerCamelCase__ =two_stage_num_proposals
lowerCamelCase__ =with_box_refine
lowerCamelCase__ =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCamelCase__ =class_cost
lowerCamelCase__ =bbox_cost
lowerCamelCase__ =giou_cost
# Loss coefficients
lowerCamelCase__ =mask_loss_coefficient
lowerCamelCase__ =dice_loss_coefficient
lowerCamelCase__ =bbox_loss_coefficient
lowerCamelCase__ =giou_loss_coefficient
lowerCamelCase__ =eos_coefficient
lowerCamelCase__ =focal_alpha
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self ):
return self.encoder_attention_heads
@property
def _a ( self ):
return self.d_model
def _a ( self ):
lowerCamelCase__ =copy.deepcopy(self.__dict__ )
lowerCamelCase__ =self.backbone_config.to_dict()
lowerCamelCase__ =self.__class__.model_type
return output
| 530
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any =""
__lowerCAmelCase : Optional[Any] =""
__lowerCAmelCase : Tuple =""
__lowerCAmelCase : List[Any] =1 # (0 is vertical, 1 is horizontal)
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_dataset(_lowerCamelCase , _lowerCamelCase )
print('''Processing...''' )
__SCREAMING_SNAKE_CASE : List[Any] = update_image_and_anno(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for index, image in enumerate(_lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__SCREAMING_SNAKE_CASE : Dict = random_chars(32 )
__SCREAMING_SNAKE_CASE : Optional[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(_lowerCamelCase )} with {file_name}''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for anno in new_annos[index]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(_lowerCamelCase )
with open(F'''/{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , '''*.txt''' ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(_lowerCamelCase ) as in_file:
__SCREAMING_SNAKE_CASE : List[str] = in_file.readlines()
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(_lowerCamelCase , F'''{label_name}.jpg''' )
__SCREAMING_SNAKE_CASE : List[str] = []
for obj_list in obj_lists:
__SCREAMING_SNAKE_CASE : Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 1 ):
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : str = []
for idx in range(len(_lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = img_list[idx]
path_list.append(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = anno_list[idx]
__SCREAMING_SNAKE_CASE : List[str] = cva.imread(_lowerCamelCase )
if flip_type == 1:
__SCREAMING_SNAKE_CASE : str = cva.flip(_lowerCamelCase , _lowerCamelCase )
for bbox in img_annos:
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__SCREAMING_SNAKE_CASE : int = cva.flip(_lowerCamelCase , _lowerCamelCase )
for bbox in img_annos:
__SCREAMING_SNAKE_CASE : Optional[int] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowerCamelCase )
new_imgs_list.append(_lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _UpperCamelCase ( lowercase__ = 32 ):
assert number_char > 1, "The number of character should greater than 1"
__SCREAMING_SNAKE_CASE : List[str] = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 704
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> Optional[int]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase__ ) for s in shape] )}.npy'''
def __magic_name__( self :List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :Any=(4, 4, 64, 64) , lowerCAmelCase__ :List[Any]=False ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase__ , lowerCAmelCase__ ) ) , dtype=lowerCAmelCase__ )
return image
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :int="CompVis/stable-diffusion-v1-4" ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Optional[int] = '''bf16''' if fpaa else None
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase__ , subfolder='''unet''' , dtype=lowerCAmelCase__ , revision=lowerCAmelCase__ )
return model, params
def __magic_name__( self :Any , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :Optional[int]=(4, 77, 768) , lowerCAmelCase__ :str=False ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : str = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Any = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase__ , lowerCAmelCase__ ) ) , dtype=lowerCAmelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __magic_name__( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.get_latents(lowerCAmelCase__ , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.get_encoder_hidden_states(lowerCAmelCase__ , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = model.apply(
{'''params''': params} , lowerCAmelCase__ , jnp.array(lowerCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase__ , ).sample
assert sample.shape == latents.shape
__SCREAMING_SNAKE_CASE : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array(lowerCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = self.get_latents(lowerCAmelCase__ , shape=(4, 4, 96, 96) , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = self.get_encoder_hidden_states(lowerCAmelCase__ , shape=(4, 77, 1_024) , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model.apply(
{'''params''': params} , lowerCAmelCase__ , jnp.array(lowerCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase__ , ).sample
assert sample.shape == latents.shape
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE : str = jnp.array(lowerCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-2 )
| 260
| 0
|
'''simple docstring'''
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def A_ ( ):
SCREAMING_SNAKE_CASE:int = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE:Optional[int] = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE:int = get_cluster_input()
return config
def A_ ( snake_case=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE:List[str] = subparsers.add_parser("config" , description=snake_case )
else:
SCREAMING_SNAKE_CASE:int = argparse.ArgumentParser("Accelerate config command" , description=snake_case )
parser.add_argument(
"--config_file" , default=snake_case , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case )
return parser
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE:int = args.config_file
else:
if not os.path.isdir(snake_case ):
os.makedirs(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(snake_case )
else:
config.to_yaml_file(snake_case )
print(F'''accelerate configuration saved at {config_file}''' )
def A_ ( ):
SCREAMING_SNAKE_CASE:Optional[int] = config_command_parser()
SCREAMING_SNAKE_CASE:List[str] = parser.parse_args()
config_command(snake_case )
if __name__ == "__main__":
main()
| 143
|
"""simple docstring"""
from __future__ import annotations
def _A( lowerCAmelCase ):
if len(lowerCAmelCase ) == 0:
return []
A__ , A__ : Dict = min(lowerCAmelCase ), max(lowerCAmelCase )
A__ : List[Any] = int(max_value - min_value ) + 1
A__ : list[list] = [[] for _ in range(lowerCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase )
return [v for bucket in buckets for v in sorted(lowerCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 363
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="unispeech-sat"
def __init__( self , UpperCamelCase_=32 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_="group" , UpperCamelCase_="gelu" , UpperCamelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase_=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase_=False , UpperCamelCase_=1_28 , UpperCamelCase_=16 , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=0.0_5 , UpperCamelCase_=10 , UpperCamelCase_=2 , UpperCamelCase_=0.0 , UpperCamelCase_=10 , UpperCamelCase_=0 , UpperCamelCase_=3_20 , UpperCamelCase_=2 , UpperCamelCase_=0.1 , UpperCamelCase_=1_00 , UpperCamelCase_=2_56 , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_="mean" , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=2_56 , UpperCamelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) , UpperCamelCase_=(5, 3, 3, 1, 1) , UpperCamelCase_=(1, 2, 3, 1, 1) , UpperCamelCase_=5_12 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=5_04 , **UpperCamelCase_ , ) -> List[Any]:
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
__lowercase : Dict = hidden_size
__lowercase : Optional[int] = feat_extract_norm
__lowercase : List[Any] = feat_extract_activation
__lowercase : List[str] = list(UpperCamelCase_ )
__lowercase : Dict = list(UpperCamelCase_ )
__lowercase : Optional[int] = list(UpperCamelCase_ )
__lowercase : Optional[int] = conv_bias
__lowercase : List[Any] = num_conv_pos_embeddings
__lowercase : Union[str, Any] = num_conv_pos_embedding_groups
__lowercase : int = len(self.conv_dim )
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : Dict = intermediate_size
__lowercase : List[str] = hidden_act
__lowercase : Any = num_attention_heads
__lowercase : Any = hidden_dropout
__lowercase : int = attention_dropout
__lowercase : Optional[int] = activation_dropout
__lowercase : List[Any] = feat_proj_dropout
__lowercase : List[Any] = final_dropout
__lowercase : Tuple = layerdrop
__lowercase : Any = layer_norm_eps
__lowercase : str = initializer_range
__lowercase : int = vocab_size
__lowercase : List[str] = num_clusters
__lowercase : Union[str, Any] = do_stable_layer_norm
__lowercase : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase : int = apply_spec_augment
__lowercase : Union[str, Any] = mask_time_prob
__lowercase : Dict = mask_time_length
__lowercase : str = mask_time_min_masks
__lowercase : Union[str, Any] = mask_feature_prob
__lowercase : int = mask_feature_length
__lowercase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowercase : Dict = num_codevectors_per_group
__lowercase : List[str] = num_codevector_groups
__lowercase : Tuple = contrastive_logits_temperature
__lowercase : Any = feat_quantizer_dropout
__lowercase : Union[str, Any] = num_negatives
__lowercase : Tuple = codevector_dim
__lowercase : str = proj_codevector_dim
__lowercase : int = diversity_loss_weight
# ctc loss
__lowercase : Any = ctc_loss_reduction
__lowercase : Tuple = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase : Dict = list(UpperCamelCase_ )
__lowercase : int = list(UpperCamelCase_ )
__lowercase : Dict = list(UpperCamelCase_ )
__lowercase : Optional[int] = xvector_output_dim
@property
def _lowerCamelCase ( self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 703
|
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = len(__UpperCamelCase )
__lowercase : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase : Tuple = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase : Optional[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 523
| 0
|
def UpperCamelCase_( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
_UpperCamelCase = generate_large_matrix()
_UpperCamelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCamelCase_( snake_case__: list[list[int]] ) -> None:
assert all(row == sorted(snake_case__ , reverse=snake_case__ ) for row in grid )
assert all(list(snake_case__ ) == sorted(snake_case__ , reverse=snake_case__ ) for col in zip(*snake_case__ ) )
def UpperCamelCase_( snake_case__: list[int] ) -> int:
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(snake_case__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase__ = (left + right) // 2
UpperCAmelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase__ = mid + 1
else:
UpperCAmelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(snake_case__ )
def UpperCamelCase_( snake_case__: list[list[int]] ) -> int:
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(grid[0] )
for i in range(len(snake_case__ ) ):
UpperCAmelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(snake_case__ ) * len(grid[0] )) - total
def UpperCamelCase_( snake_case__: list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCamelCase_( snake_case__: list[list[int]] ) -> int:
UpperCAmelCase__ = 0
for row in grid:
for i, number in enumerate(snake_case__ ):
if number < 0:
total += len(snake_case__ ) - i
break
return total
def UpperCamelCase_( ) -> None:
from timeit import timeit
print('Running benchmarks' )
UpperCAmelCase__ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase__ = timeit(f"{func}(grid=grid)" , setup=snake_case__ , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 146
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=12 , __a=7 , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=512 , __a=0.02 , __a=0 , __a=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
UpperCAmelCase__ = bos_token_id
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase__ = input_mask.numpy()
UpperCAmelCase__ , UpperCAmelCase__ = input_mask.shape
UpperCAmelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__a ):
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
UpperCAmelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFBlipTextModel(config=__a )
UpperCAmelCase__ = model(__a , attention_mask=__a , training=__a )
UpperCAmelCase__ = model(__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (TFBlipTextModel,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = BlipTextModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFBlipTextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self , __a=True ) -> str:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=__a )
| 146
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase_ : List[Any] = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCAmelCase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 521
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : List[Any] = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 521
| 1
|
"""simple docstring"""
from collections.abc import Sequence
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = False ) -> Optional[int]:
if not arr:
return 0
SCREAMING_SNAKE_CASE__ : List[str] = 0 if allow_empty_subarrays else float("""-inf""" )
SCREAMING_SNAKE_CASE__ : str = 0.0
for num in arr:
SCREAMING_SNAKE_CASE__ : List[Any] = max(0 if allow_empty_subarrays else num , curr_sum + num )
SCREAMING_SNAKE_CASE__ : Dict = max(_snake_case , _snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a :Tuple = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'{max_subarray_sum(nums) = }')
| 680
|
"""simple docstring"""
def lowercase_ ( _snake_case ):
if not head:
return True
# split the list to two parts
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = head.next, head
while fast and fast.next:
SCREAMING_SNAKE_CASE__ : Tuple = fast.next.next
SCREAMING_SNAKE_CASE__ : Optional[int] = slow.next
SCREAMING_SNAKE_CASE__ : List[Any] = slow.next
SCREAMING_SNAKE_CASE__ : int = None # Don't forget here! But forget still works!
# reverse the second part
SCREAMING_SNAKE_CASE__ : int = None
while second:
SCREAMING_SNAKE_CASE__ : List[str] = second.next
SCREAMING_SNAKE_CASE__ : List[str] = node
SCREAMING_SNAKE_CASE__ : List[Any] = second
SCREAMING_SNAKE_CASE__ : List[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
SCREAMING_SNAKE_CASE__ : Optional[Any] = node.next
SCREAMING_SNAKE_CASE__ : Any = head.next
return True
def lowercase_ ( _snake_case ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = head
while fast and fast.next:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
SCREAMING_SNAKE_CASE__ : Optional[int] = [slow.val]
while slow.next:
SCREAMING_SNAKE_CASE__ : Any = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
SCREAMING_SNAKE_CASE__ : int = cur.next
return True
def lowercase_ ( _snake_case ):
if not head or not head.next:
return True
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : Any = 0
while head:
if head.val in d:
d[head.val].append(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [pos]
SCREAMING_SNAKE_CASE__ : str = head.next
pos += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = pos - 1
SCREAMING_SNAKE_CASE__ : Dict = 0
for v in d.values():
if len(_snake_case ) % 2 != 0:
middle += 1
else:
SCREAMING_SNAKE_CASE__ : str = 0
for i in range(0 ,len(_snake_case ) ):
if v[i] + v[len(_snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 223
| 0
|
'''simple docstring'''
from __future__ import annotations
_A : List[str] ='''Muhammad Umer Farooq'''
_A : List[Any] ='''MIT'''
_A : Union[str, Any] ='''1.0.0'''
_A : str ='''Muhammad Umer Farooq'''
_A : Tuple ='''contact@muhammadumerfarooq.me'''
_A : Union[str, Any] ='''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: str ):
super().__init__()
lowerCamelCase__ : list[str] = []
lowerCamelCase__ : str = domain
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCamelCase__ : int = parse.urljoin(self.domain , UpperCamelCase__ )
self.urls.append(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return ".".join(get_sub_domain_name(UpperCamelCase ).split(""".""" )[-2:] )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return parse.urlparse(UpperCamelCase ).netloc
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "https://github.com" ) -> list[str]:
lowerCamelCase__ : int = get_domain_name(UpperCamelCase )
# Initialize the parser
lowerCamelCase__ : Union[str, Any] = Parser(UpperCamelCase )
try:
# Open URL
lowerCamelCase__ : Union[str, Any] = requests.get(UpperCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCamelCase__ : Any = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCamelCase__ : Dict = requests.get(UpperCamelCase )
# Get the valid email.
lowerCamelCase__ : Any = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[Any] =emails_from_url('''https://github.com''')
print(F'{len(emails)} emails found:')
print('''\n'''.join(sorted(emails)))
| 631
|
'''simple docstring'''
_A : List[str] ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 631
| 1
|
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> int:
"""simple docstring"""
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''String lengths must match!''' )
lowerCamelCase_ =0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,)
| 8
| 0
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
_lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(snake_case__ )
# Let's go
_lowercase = parser.parse_args()
if not hasattr(snake_case__ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase = args.func(snake_case__ )
service.run()
if __name__ == "__main__":
main()
| 535
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 535
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __lowerCAmelCase :
'''simple docstring'''
@staticmethod
def _a ( *a , **a ):
"""simple docstring"""
pass
def A ( _A ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__UpperCAmelCase : int = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
a__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _a ( self , a , a , a ):
"""simple docstring"""
snake_case_ :int = pipeline(
"document-question-answering" , model=__lowercase , tokenizer=__lowercase , image_processor=__lowercase )
snake_case_ :int = INVOICE_URL
snake_case_ :Dict = list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , "" ) ) )
snake_case_ :Tuple = '''What is the placebo?'''
snake_case_ :Tuple = [
{
'''image''': load_image(__lowercase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :int = dqa_pipeline(__lowercase , top_k=2 )
self.assertEqual(
__lowercase , [
[
{"score": ANY(__lowercase ), "answer": ANY(__lowercase ), "start": ANY(__lowercase ), "end": ANY(__lowercase )},
{"score": ANY(__lowercase ), "answer": ANY(__lowercase ), "start": ANY(__lowercase ), "end": ANY(__lowercase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self ):
"""simple docstring"""
snake_case_ :Dict = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
snake_case_ :List[Any] = INVOICE_URL
snake_case_ :int = '''How many cats are there?'''
snake_case_ :List[Any] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
snake_case_ :Union[str, Any] = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , __lowercase )
snake_case_ :Any = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , __lowercase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
snake_case_ :Optional[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
snake_case_ :Optional[Any] = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(__lowercase , [] )
# We can optionnally pass directly the words and bounding boxes
snake_case_ :Tuple = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
snake_case_ :Optional[int] = []
snake_case_ :Optional[int] = []
snake_case_ :str = dqa_pipeline(image=__lowercase , question=__lowercase , words=__lowercase , boxes=__lowercase , top_k=2 )
self.assertEqual(__lowercase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
snake_case_ :Union[str, Any] = INVOICE_URL
snake_case_ :Union[str, Any] = '''What is the invoice number?'''
snake_case_ :Optional[Any] = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
snake_case_ :Union[str, Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
snake_case_ :int = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self ):
"""simple docstring"""
snake_case_ :List[str] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
snake_case_ :List[Any] = INVOICE_URL
snake_case_ :List[str] = '''What is the invoice number?'''
snake_case_ :List[Any] = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
snake_case_ :Any = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
snake_case_ :List[str] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[Any] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowercase )
snake_case_ :List[str] = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowercase , revision="3dc6de3" , )
snake_case_ :Optional[Any] = INVOICE_URL
snake_case_ :Union[str, Any] = '''What is the invoice number?'''
snake_case_ :Dict = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
snake_case_ :List[str] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
snake_case_ :Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
snake_case_ :Any = list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , "" ) ) )
# This model should also work if `image` is set to None
snake_case_ :Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _a ( self ):
"""simple docstring"""
snake_case_ :Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowercase )
snake_case_ :Any = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowercase , revision="3dc6de3" , max_seq_len=50 , )
snake_case_ :Any = INVOICE_URL
snake_case_ :Optional[int] = '''What is the invoice number?'''
snake_case_ :Dict = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
snake_case_ :Dict = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
snake_case_ :Dict = list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , "" ) ) )
# This model should also work if `image` is set to None
snake_case_ :List[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _a ( self ):
"""simple docstring"""
snake_case_ :Union[str, Any] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
snake_case_ :List[str] = INVOICE_URL
snake_case_ :int = '''What is the invoice number?'''
snake_case_ :Any = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _a ( self ):
"""simple docstring"""
pass
| 584
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list[int | str] ):
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE ) )] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list[int | str] , SCREAMING_SNAKE_CASE : list[int | str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , ):
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE ):
print(SCREAMING_SNAKE_CASE )
return
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__lowerCamelCase : List[Any] =True
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE )
current_sequence.pop()
__lowerCamelCase : List[Any] =False
_UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_UpperCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 179
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707
|
from __future__ import annotations
from collections.abc import MutableSequence
class __lowercase :
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(__lowerCamelCase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase = list(__lowerCamelCase )
lowercase = degree
def __add__( self : Any , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowerCamelCase )
else:
lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowerCamelCase )
def __sub__( self : str , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : str ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[str] , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowerCamelCase )
def __a ( self : List[str] , __lowerCamelCase : int | float ) -> int | float:
'''simple docstring'''
lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ) -> str:
'''simple docstring'''
lowercase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCamelCase )
return polynomial
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return self.__str__()
def __a ( self : Union[str, Any] ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * self.degree
for i in range(self.degree ):
lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowerCamelCase )
def __a ( self : Union[str, Any] , __lowerCamelCase : int | float = 0 ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + 2)
lowercase = constant
for i in range(self.degree + 1 ):
lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowerCamelCase )
def __eq__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
return not self.__eq__(__lowerCamelCase )
| 479
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict):
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = MobileBertConfig.from_json_file(UpperCAmelCase__)
print(F'''Building PyTorch model from configuration: {config}''')
lowerCamelCase : int = MobileBertForPreTraining(UpperCAmelCase__)
# Load weights from tf checkpoint
lowerCamelCase : Union[str, Any] = load_tf_weights_in_mobilebert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , UpperCAmelCase__)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 320
|
'''simple docstring'''
def __snake_case ( lowercase : int = 1_000_000 ):
snake_case_ = set(range(3 , lowercase , 2 ) )
primes.add(2 )
for p in range(3 , lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase , lowercase ) ) )
snake_case_ = [float(lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase , limit + 1 , lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 508
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowerCamelCase__ : str = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"]
lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice.
lowerCamelCase__ : str = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 5
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Optional[int] = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "xmod"
def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = position_embedding_type
lowerCamelCase__ : str = use_cache
lowerCamelCase__ : Union[str, Any] = classifier_dropout
lowerCamelCase__ : Any = pre_norm
lowerCamelCase__ : Tuple = adapter_reduction_factor
lowerCamelCase__ : Tuple = adapter_layer_norm
lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm
lowerCamelCase__ : Dict = ln_before_adapter
lowerCamelCase__ : List[Any] = list(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = default_language
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE__:List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def _lowerCamelCase( a , a , a ):
__a = state_dict.pop(a )
__a = val
def _lowerCamelCase( a ):
__a = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__a = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
__a = value
else:
__a = value
return new_state_dict
def _lowerCamelCase( a , a=False ):
__a = ""
if is_panoptic:
__a = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__a = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__a = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[:2_5_6, :]
__a = in_proj_bias[:2_5_6]
__a = in_proj_weight[2_5_6:5_1_2, :]
__a = in_proj_bias[2_5_6:5_1_2]
__a = in_proj_weight[-2_5_6:, :]
__a = in_proj_bias[-2_5_6:]
def _lowerCamelCase( ):
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _lowerCamelCase( a , a ):
__a = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__a = "resnet101"
if "dc5" in model_name:
__a = True
__a = "panoptic" in model_name
if is_panoptic:
__a = 2_5_0
else:
__a = 9_1
__a = "huggingface/label-files"
__a = "coco-detection-id2label.json"
__a = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) )
__a = {int(a ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
# load image processor
__a = "coco_panoptic" if is_panoptic else "coco_detection"
__a = ConditionalDetrImageProcessor(format=a )
# prepare image
__a = prepare_img()
__a = image_processor(images=a , return_tensors="pt" )
__a = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
__a = torch.hub.load("DeppMeng/ConditionalDETR" , a , pretrained=a ).eval()
__a = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__a = "conditional_detr." + src
rename_key(a , a , a )
__a = rename_backbone_keys(a )
# query, key and value matrices need special treatment
read_in_q_k_v(a , is_panoptic=a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__a = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__a = state_dict.pop(a )
__a = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__a = state_dict.pop(a )
__a = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__a = state_dict.pop(a )
__a = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__a = state_dict.pop(a )
__a = val
# finally, create HuggingFace model and load state dict
__a = ConditionalDetrForSegmentation(a ) if is_panoptic else ConditionalDetrForObjectDetection(a )
model.load_state_dict(a )
model.eval()
model.push_to_hub(repo_id=a , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
__a = conditional_detr(a )
__a = model(a )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:List[str] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 528
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
__a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
# pass variant but use the non-variant filenames
__a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
__a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
# pass variant but use the non-variant filenames
__a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__a = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
| 528
| 1
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
snake_case = 4_2
snake_case = 4_2
snake_case = 0.0
snake_case = 1
snake_case = 1
snake_case = True
snake_case = False
snake_case = False
snake_case = False
snake_case = jnp.floataa
def A( self : int ) -> int:
'''simple docstring'''
A = []
A = []
for i in range(self.num_layers ):
A = self.in_channels if i == 0 else self.out_channels
A = FlaxResnetBlockaD(
in_channels=UpperCAmelCase__ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(UpperCAmelCase__ )
A = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(UpperCAmelCase__ )
A = resnets
A = attentions
if self.add_downsample:
A = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self : Optional[int] ,_SCREAMING_SNAKE_CASE : Any ,_SCREAMING_SNAKE_CASE : str ,_SCREAMING_SNAKE_CASE : List[Any] ,_SCREAMING_SNAKE_CASE : str=True ) -> Any:
'''simple docstring'''
A = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
A = resnet(UpperCAmelCase__ ,UpperCAmelCase__ ,deterministic=UpperCAmelCase__ )
A = attn(UpperCAmelCase__ ,UpperCAmelCase__ ,deterministic=UpperCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
A = self.downsamplers_a(UpperCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
snake_case = 4_2
snake_case = 4_2
snake_case = 0.0
snake_case = 1
snake_case = True
snake_case = jnp.floataa
def A( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A = []
for i in range(self.num_layers ):
A = self.in_channels if i == 0 else self.out_channels
A = FlaxResnetBlockaD(
in_channels=UpperCAmelCase__ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(UpperCAmelCase__ )
A = resnets
if self.add_downsample:
A = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : Optional[Any] ,_SCREAMING_SNAKE_CASE : Tuple ,_SCREAMING_SNAKE_CASE : List[str]=True ) -> Union[str, Any]:
'''simple docstring'''
A = ()
for resnet in self.resnets:
A = resnet(UpperCAmelCase__ ,UpperCAmelCase__ ,deterministic=UpperCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
A = self.downsamplers_a(UpperCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
snake_case = 4_2
snake_case = 4_2
snake_case = 4_2
snake_case = 0.0
snake_case = 1
snake_case = 1
snake_case = True
snake_case = False
snake_case = False
snake_case = False
snake_case = jnp.floataa
def A( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A = []
A = []
for i in range(self.num_layers ):
A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
A = self.prev_output_channel if i == 0 else self.out_channels
A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(UpperCAmelCase__ )
A = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(UpperCAmelCase__ )
A = resnets
A = attentions
if self.add_upsample:
A = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self : Optional[Any] ,_SCREAMING_SNAKE_CASE : Optional[int] ,_SCREAMING_SNAKE_CASE : Optional[int] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : List[Any] ,_SCREAMING_SNAKE_CASE : Optional[Any]=True ) -> Any:
'''simple docstring'''
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
A = res_hidden_states_tuple[-1]
A = res_hidden_states_tuple[:-1]
A = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
A = resnet(UpperCAmelCase__ ,UpperCAmelCase__ ,deterministic=UpperCAmelCase__ )
A = attn(UpperCAmelCase__ ,UpperCAmelCase__ ,deterministic=UpperCAmelCase__ )
if self.add_upsample:
A = self.upsamplers_a(UpperCAmelCase__ )
return hidden_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
snake_case = 4_2
snake_case = 4_2
snake_case = 4_2
snake_case = 0.0
snake_case = 1
snake_case = True
snake_case = jnp.floataa
def A( self : Any ) -> Dict:
'''simple docstring'''
A = []
for i in range(self.num_layers ):
A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
A = self.prev_output_channel if i == 0 else self.out_channels
A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(UpperCAmelCase__ )
A = resnets
if self.add_upsample:
A = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self : Optional[int] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : List[str] ,_SCREAMING_SNAKE_CASE : Any ,_SCREAMING_SNAKE_CASE : str=True ) -> int:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
A = res_hidden_states_tuple[-1]
A = res_hidden_states_tuple[:-1]
A = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
A = resnet(UpperCAmelCase__ ,UpperCAmelCase__ ,deterministic=UpperCAmelCase__ )
if self.add_upsample:
A = self.upsamplers_a(UpperCAmelCase__ )
return hidden_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
snake_case = 4_2
snake_case = 0.0
snake_case = 1
snake_case = 1
snake_case = False
snake_case = False
snake_case = jnp.floataa
def A( self : str ) -> int:
'''simple docstring'''
A = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
A = []
for _ in range(self.num_layers ):
A = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(UpperCAmelCase__ )
A = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(UpperCAmelCase__ )
A = resnets
A = attentions
def __call__( self : List[str] ,_SCREAMING_SNAKE_CASE : Dict ,_SCREAMING_SNAKE_CASE : str ,_SCREAMING_SNAKE_CASE : List[str] ,_SCREAMING_SNAKE_CASE : int=True ) -> Union[str, Any]:
'''simple docstring'''
A = self.resnets[0](UpperCAmelCase__ ,UpperCAmelCase__ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
A = attn(UpperCAmelCase__ ,UpperCAmelCase__ ,deterministic=UpperCAmelCase__ )
A = resnet(UpperCAmelCase__ ,UpperCAmelCase__ ,deterministic=UpperCAmelCase__ )
return hidden_states
| 700
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def snake_case ( UpperCAmelCase : List[Any] ):
if "model" in orig_key:
A = orig_key.replace('model.', '' )
if "norm1" in orig_key:
A = orig_key.replace('norm1', 'attention.output.LayerNorm' )
if "norm2" in orig_key:
A = orig_key.replace('norm2', 'output.LayerNorm' )
if "norm" in orig_key:
A = orig_key.replace('norm', 'LayerNorm' )
if "transformer" in orig_key:
A = orig_key.split('.' )[0].split('_' )[-1]
A = orig_key.replace(f'transformer_{layer_num}', f'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
A = orig_key.replace('mha.attn', 'attention.self' )
if "mha" in orig_key:
A = orig_key.replace('mha', 'attention' )
if "W_q" in orig_key:
A = orig_key.replace('W_q', 'self.query' )
if "W_k" in orig_key:
A = orig_key.replace('W_k', 'self.key' )
if "W_v" in orig_key:
A = orig_key.replace('W_v', 'self.value' )
if "ff1" in orig_key:
A = orig_key.replace('ff1', 'intermediate.dense' )
if "ff2" in orig_key:
A = orig_key.replace('ff2', 'output.dense' )
if "ff" in orig_key:
A = orig_key.replace('ff', 'output.dense' )
if "mlm_class" in orig_key:
A = orig_key.replace('mlm.mlm_class', 'cls.predictions.decoder' )
if "mlm" in orig_key:
A = orig_key.replace('mlm', 'cls.predictions.transform' )
if "cls" not in orig_key:
A = 'yoso.' + orig_key
return orig_key
def snake_case ( UpperCAmelCase : Tuple, UpperCAmelCase : str ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(UpperCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
A = val
A = orig_state_dict['cls.predictions.decoder.bias']
A = torch.arange(UpperCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def snake_case ( UpperCAmelCase : int, UpperCAmelCase : Union[str, Any], UpperCAmelCase : str ):
A = torch.load(UpperCAmelCase, map_location='cpu' )['model_state_dict']
A = YosoConfig.from_json_file(UpperCAmelCase )
A = YosoForMaskedLM(UpperCAmelCase )
A = convert_checkpoint_helper(config.max_position_embeddings, UpperCAmelCase )
print(model.load_state_dict(UpperCAmelCase ) )
model.eval()
model.save_pretrained(UpperCAmelCase )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 110
| 0
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=100 , lowercase_=" " ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = text.split(lowercase_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase_ ) , lowercase_ )]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(lowercase_ ):
titles.append(title if title is not None else '''''' )
texts.append(lowercase_ )
return {"title": titles, "text": texts}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=lowercase_ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
__UpperCAmelCase : str = ctx_encoder(input_ids.to(device=lowercase_ ) , return_dict=lowercase_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
'''simple docstring'''
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__UpperCAmelCase : List[str] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__UpperCAmelCase : Dict = dataset.map(lowercase_ , batched=lowercase_ , num_proc=processing_args.num_proc )
# And compute the embeddings
__UpperCAmelCase : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase_ )
__UpperCAmelCase : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__UpperCAmelCase : List[Any] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
__UpperCAmelCase : Any = dataset.map(
partial(lowercase_ , ctx_encoder=lowercase_ , ctx_tokenizer=lowercase_ ) , batched=lowercase_ , batch_size=processing_args.batch_size , features=lowercase_ , )
# And finally save your dataset
__UpperCAmelCase : List[str] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(lowercase_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__UpperCAmelCase : str = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=lowercase_ )
# And save the index
__UpperCAmelCase : List[str] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(lowercase_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCamelCase :
_lowerCAmelCase : str = field(
default=str(Path(_UpperCamelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
_lowerCAmelCase : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
_lowerCAmelCase : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=str(Path(_UpperCamelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
_lowerCAmelCase : int = field(
default=1_6 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class lowerCamelCase :
_lowerCAmelCase : int = field(
default=7_6_8 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
_lowerCAmelCase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 462
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCAmelCase = random.Random()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=1.0 , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
__UpperCAmelCase : str = global_rng
__UpperCAmelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=4_0_0 , lowercase__=2_0_0_0 , lowercase__=2_0_4_8 , lowercase__=1_2_8 , lowercase__=1 , lowercase__=5_1_2 , lowercase__=3_0 , lowercase__=4_4_1_0_0 , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : int = min_seq_length
__UpperCAmelCase : List[str] = max_seq_length
__UpperCAmelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : Any = spectrogram_length
__UpperCAmelCase : List[Any] = feature_size
__UpperCAmelCase : Union[str, Any] = num_audio_channels
__UpperCAmelCase : Optional[int] = hop_length
__UpperCAmelCase : Tuple = chunk_length
__UpperCAmelCase : Any = sampling_rate
def A( self):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def A( self , lowercase__=False , lowercase__=False):
def _flatten(lowercase__):
return list(itertools.chain(*lowercase__))
if equal_length:
__UpperCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__UpperCAmelCase : List[str] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__UpperCAmelCase : List[str] = [np.asarray(lowercase__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = TvltFeatureExtractor
def A( self):
__UpperCAmelCase : Dict = TvltFeatureExtractionTester(self)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowercase__ , '''spectrogram_length'''))
self.assertTrue(hasattr(lowercase__ , '''feature_size'''))
self.assertTrue(hasattr(lowercase__ , '''num_audio_channels'''))
self.assertTrue(hasattr(lowercase__ , '''hop_length'''))
self.assertTrue(hasattr(lowercase__ , '''chunk_length'''))
self.assertTrue(hasattr(lowercase__ , '''sampling_rate'''))
def A( self):
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = feat_extract_first.save_pretrained(lowercase__)[0]
check_json_file_has_correct_format(lowercase__)
__UpperCAmelCase : Optional[int] = self.feature_extraction_class.from_pretrained(lowercase__)
__UpperCAmelCase : List[Any] = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Union[str, Any] = dict_first.pop('''mel_filters''')
__UpperCAmelCase : Union[str, Any] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowercase__)
__UpperCAmelCase : str = self.feature_extraction_class.from_json_file(lowercase__)
__UpperCAmelCase : Any = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Tuple = dict_first.pop('''mel_filters''')
__UpperCAmelCase : List[str] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
# Initialize feature_extractor
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : Optional[int] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__UpperCAmelCase : int = [np.asarray(lowercase__) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test batched
__UpperCAmelCase : List[str] = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test audio masking
__UpperCAmelCase : Tuple = feature_extractor(
lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=lowercase__).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : Any = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__UpperCAmelCase : Optional[Any] = np.asarray(lowercase__)
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
__UpperCAmelCase : int = ds.sort('''id''').select(range(lowercase__))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def A( self):
__UpperCAmelCase : Optional[Any] = self._load_datasamples(1)
__UpperCAmelCase : Tuple = TvltFeatureExtractor()
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''pt''').audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8))
__UpperCAmelCase : int = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]])
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowercase__ , atol=1e-4))
| 462
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : int = """roberta-prelayernorm"""
def __init__( self , A=5_0265 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : int = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : List[str] = position_embedding_type
_lowerCamelCase : Any = use_cache
_lowerCamelCase : Tuple = classifier_dropout
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
_lowerCamelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCamelCase : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 718
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Optional[int] = ShapEImgaImgPipeline
a_ : Union[str, Any] = ["""image"""]
a_ : Union[str, Any] = ["""image"""]
a_ : Optional[Any] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
a_ : int = False
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ):
return 8
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_lowerCamelCase : int = CLIPVisionModel(A )
return model
@property
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = CLIPImageProcessor(
crop_size=224 , do_center_crop=A , do_normalize=A , do_resize=A , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowerCamelCase : Optional[Any] = PriorTransformer(**A )
return model
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_lowerCamelCase : Any = ShapERenderer(**A )
return model
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = self.dummy_prior
_lowerCamelCase : Union[str, Any] = self.dummy_image_encoder
_lowerCamelCase : Optional[int] = self.dummy_image_processor
_lowerCamelCase : str = self.dummy_renderer
_lowerCamelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=A , clip_sample=A , clip_sample_range=1.0 , )
_lowerCamelCase : List[str] = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _lowerCAmelCase ( self , A , A=0 ):
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
_lowerCamelCase : Any = torch.manual_seed(A )
else:
_lowerCamelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_lowerCamelCase : Optional[int] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = 'cpu'
_lowerCamelCase : Union[str, Any] = self.get_dummy_components()
_lowerCamelCase : int = self.pipeline_class(**A )
_lowerCamelCase : Tuple = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(A ) )
_lowerCamelCase : Tuple = output.images[0]
_lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCamelCase : List[Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = torch_device == 'cpu'
_lowerCamelCase : Optional[int] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=A , relax_max_difference=A , )
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = self.get_dummy_components()
_lowerCamelCase : int = self.pipeline_class(**A )
_lowerCamelCase : Union[str, Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : Tuple = 2
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(A )
for key in inputs.keys():
if key in self.batch_params:
_lowerCamelCase : List[Any] = batch_size * [inputs[key]]
_lowerCamelCase : int = pipe(**A , num_images_per_prompt=A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
_lowerCamelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
_lowerCamelCase : List[str] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
_lowerCamelCase : Optional[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_lowerCamelCase : Dict = torch.Generator(device=A ).manual_seed(0 )
_lowerCamelCase : List[Any] = pipe(
A , generator=A , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A , A )
| 349
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Any=7 , _UpperCamelCase : int=True , _UpperCamelCase : Any=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Any=9_9 , _UpperCamelCase : int=3_6 , _UpperCamelCase : str=3 , _UpperCamelCase : str=4 , _UpperCamelCase : List[str]=3_7 , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Dict=5_1_2 , _UpperCamelCase : str=1_6 , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : int=0.02 , _UpperCamelCase : Any=6 , _UpperCamelCase : Optional[Any]=6 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Any=4 , _UpperCamelCase : List[Any]=None , _UpperCamelCase : List[Any]=1_0_0_0 , ) ->Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = text_seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ = text_seq_length
snake_case_ = (image_size // patch_size) ** 2 + 1
snake_case_ = self.text_seq_length + self.image_seq_length
def snake_case__( self : str ) ->Optional[Any]:
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__( self : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ) ->Any:
snake_case_ = LayoutLMvaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# text + image
snake_case_ = model(_UpperCamelCase , pixel_values=_UpperCamelCase )
snake_case_ = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , token_type_ids=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ = model(pixel_values=_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str ) ->Dict:
snake_case_ = self.num_labels
snake_case_ = LayoutLMvaForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : Tuple , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) ->List[Any]:
snake_case_ = self.num_labels
snake_case_ = LayoutLMvaForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case__( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any ) ->Any:
snake_case_ = LayoutLMvaForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__( self : List[str] ) ->List[str]:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Any = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : List[Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def snake_case__( self : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] ) ->int:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = LayoutLMvaModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int=False ) ->List[str]:
snake_case_ = copy.deepcopy(_UpperCamelCase )
if model_class in get_values(_UpperCamelCase ):
snake_case_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_UpperCamelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_UpperCamelCase ):
snake_case_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
elif model_class in get_values(_UpperCamelCase ):
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
elif model_class in [
*get_values(_UpperCamelCase ),
]:
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
elif model_class in [
*get_values(_UpperCamelCase ),
]:
snake_case_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_UpperCamelCase , )
return inputs_dict
def snake_case__( self : Tuple ) ->List[str]:
self.config_tester.run_common_tests()
def snake_case__( self : Any ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : int ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def snake_case__( self : str ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def snake_case__( self : Tuple ) ->Optional[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LayoutLMvaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__( self : Optional[int] ) ->str:
return LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) if is_vision_available() else None
@slow
def snake_case__( self : Dict ) ->int:
snake_case_ = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(_UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).pixel_values.to(_UpperCamelCase )
snake_case_ = torch.tensor([[1, 2]] )
snake_case_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ = model(
input_ids=input_ids.to(_UpperCamelCase ) , bbox=bbox.to(_UpperCamelCase ) , pixel_values=pixel_values.to(_UpperCamelCase ) , )
# verify the logits
snake_case_ = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
| 39
|
"""simple docstring"""
import operator
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = None ) -> list:
_snake_case = operator.lt if reverse else operator.gt
_snake_case = solution or []
if not arr:
return solution
_snake_case = [arr.pop(0 )]
for i, item in enumerate(lowerCAmelCase_ ):
if _operator(lowerCAmelCase_ , sublist[-1] ):
sublist.append(lowerCAmelCase_ )
arr.pop(lowerCAmelCase_ )
# merging sublist into solution list
if not solution:
solution.extend(lowerCAmelCase_ )
else:
while sublist:
_snake_case = sublist.pop(0 )
for i, xx in enumerate(lowerCAmelCase_ ):
if not _operator(lowerCAmelCase_ , lowerCAmelCase_ ):
solution.insert(lowerCAmelCase_ , lowerCAmelCase_ )
break
else:
solution.append(lowerCAmelCase_ )
strand_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 103
| 0
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __lowercase( __snake_case : Union[str, Any] ) -> Union[str, Any]:
return x + 2
class _lowerCamelCase (unittest.TestCase ):
def __lowerCamelCase ( self ):
__snake_case = 'x = 3'
__snake_case = {}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 3} )
__snake_case = 'x = y'
__snake_case = {'y': 5}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 5, 'y': 5} )
def __lowerCamelCase ( self ):
__snake_case = 'y = add_two(x)'
__snake_case = {'x': 3}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {'add_two': add_two} , state=SCREAMING_SNAKE_CASE_ )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
assert result is None
assert "tried to execute add_two" in out.out
def __lowerCamelCase ( self ):
__snake_case = 'x = 3'
__snake_case = {}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 3} )
def __lowerCamelCase ( self ):
__snake_case = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
__snake_case = {'x': 3}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {'add_two': add_two} , state=SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 3, 'y': 5} )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def __lowerCamelCase ( self ):
__snake_case = 'x = 3\ny = 5'
__snake_case = {}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 3, 'y': 5} )
def __lowerCamelCase ( self ):
__snake_case = 'text = f\'This is x: {x}.\''
__snake_case = {'x': 3}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 3, 'text': 'This is x: 3.'} )
def __lowerCamelCase ( self ):
__snake_case = 'if x <= 3:\n y = 2\nelse:\n y = 5'
__snake_case = {'x': 3}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 3, 'y': 2} )
__snake_case = {'x': 8}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 8, 'y': 5} )
def __lowerCamelCase ( self ):
__snake_case = 'test_list = [x, add_two(x)]'
__snake_case = {'x': 3}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {'add_two': add_two} , state=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [3, 5] )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 3, 'test_list': [3, 5]} )
def __lowerCamelCase ( self ):
__snake_case = 'y = x'
__snake_case = {'x': 3}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 3, 'y': 3} )
def __lowerCamelCase ( self ):
__snake_case = 'test_list = [x, add_two(x)]\ntest_list[1]'
__snake_case = {'x': 3}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {'add_two': add_two} , state=SCREAMING_SNAKE_CASE_ )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 3, 'test_list': [3, 5]} )
__snake_case = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
__snake_case = {'x': 3}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {'add_two': add_two} , state=SCREAMING_SNAKE_CASE_ )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def __lowerCamelCase ( self ):
__snake_case = 'x = 0\nfor i in range(3):\n x = i'
__snake_case = {}
__snake_case = evaluate(SCREAMING_SNAKE_CASE_ , {'range': range} , state=SCREAMING_SNAKE_CASE_ )
assert result == 2
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'x': 2, 'i': 2} )
| 345
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 345
| 1
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(_a ):
for j in range(_a ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [[float("inf" ) for _ in range(_a )] for _ in range(_a )]
for i in range(_a ):
for j in range(_a ):
__SCREAMING_SNAKE_CASE = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_a ):
# looping through rows of graph array
for i in range(_a ):
# looping through columns of graph array
for j in range(_a ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__SCREAMING_SNAKE_CASE = dist[i][k] + dist[k][j]
_print_dist(_a , _a )
return dist, v
if __name__ == "__main__":
a__ : List[str] = int(input('''Enter number of vertices: '''))
a__ : Any = int(input('''Enter number of edges: '''))
a__ : Tuple = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
a__ : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
a__ : int = int(input('''Enter source:'''))
a__ : Any = int(input('''Enter destination:'''))
a__ : List[str] = float(input('''Enter weight:'''))
a__ : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 682
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase =get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = BartphoTokenizer
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase__ ( self ) -> str:
super().setUp()
A = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
A = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
A = {"""unk_token""": """<unk>"""}
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
A = BartphoTokenizer(lowerCamelCase_ ,self.monolingual_vocab_file ,**self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Optional[Any]:
A = """This is a là test"""
A = """This is a<unk><unk> test"""
return input_text, output_text
def UpperCamelCase__ ( self ) -> int:
A = BartphoTokenizer(lowerCamelCase_ ,self.monolingual_vocab_file ,**self.special_tokens_map )
A = """This is a là test"""
A = """▁This ▁is ▁a ▁l à ▁t est""".split()
A = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
A = tokens + [tokenizer.unk_token]
A = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 617
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = DanceDiffusionPipeline
UpperCamelCase : Tuple = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
UpperCamelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
UpperCamelCase : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
UpperCamelCase : Dict = False
UpperCamelCase : Optional[Any] = False
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Tuple = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCamelCase , use_timestep_embedding=__UpperCamelCase , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
__A : int = IPNDMScheduler()
__A : int = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(__UpperCamelCase ).startswith('mps' ):
__A : str = torch.manual_seed(__UpperCamelCase )
else:
__A : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__A : List[str] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : List[str] = self.get_dummy_components()
__A : str = DanceDiffusionPipeline(**__UpperCamelCase )
__A : List[Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__A : List[str] = self.get_dummy_inputs(__UpperCamelCase )
__A : int = pipe(**__UpperCamelCase )
__A : Optional[Any] = output.audios
__A : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__A : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = torch_device
__A : Optional[int] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
__A : List[Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__A : int = torch.manual_seed(0 )
__A : str = pipe(generator=__UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
__A : List[Any] = output.audios
__A : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__A : Any = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = torch_device
__A : Union[str, Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
__A : List[str] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__A : Optional[int] = torch.manual_seed(0 )
__A : Dict = pipe(generator=__UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
__A : Union[str, Any] = output.audios
__A : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__A : Any = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 714
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _A( snake_case__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( _A ):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self ):
raise NotImplementedError()
| 77
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=False ) -> List[str]:
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase (SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ''''''
else:
SCREAMING_SNAKE_CASE = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def lowercase (SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE = dct.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = val
def lowercase () -> str:
SCREAMING_SNAKE_CASE = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE = ViTConfig()
SCREAMING_SNAKE_CASE = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = int(vit_name[-12:-10] )
SCREAMING_SNAKE_CASE = int(vit_name[-9:-6] )
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = int(vit_name[-6:-4] )
SCREAMING_SNAKE_CASE = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = 7_68
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 3
elif vit_name[9:].startswith('small' ):
SCREAMING_SNAKE_CASE = 3_84
SCREAMING_SNAKE_CASE = 15_36
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
SCREAMING_SNAKE_CASE = 7_68
SCREAMING_SNAKE_CASE = 23_04
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
SCREAMING_SNAKE_CASE = 10_24
SCREAMING_SNAKE_CASE = 40_96
SCREAMING_SNAKE_CASE = 24
SCREAMING_SNAKE_CASE = 16
elif vit_name[4:].startswith('huge' ):
SCREAMING_SNAKE_CASE = 12_80
SCREAMING_SNAKE_CASE = 51_20
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = 16
# load original model from timm
SCREAMING_SNAKE_CASE = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE = timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE = ViTModel(SCREAMING_SNAKE_CASE_ ).eval()
else:
SCREAMING_SNAKE_CASE = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
SCREAMING_SNAKE_CASE = DeiTImageProcessor(size=config.image_size )
else:
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE = encoding['''pixel_values''']
SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ )
if base_model:
SCREAMING_SNAKE_CASE = timm_model.forward_features(SCREAMING_SNAKE_CASE_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.pooler_output , atol=1E-3 )
else:
SCREAMING_SNAKE_CASE = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCamelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 247
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( snake_case : BertModel , snake_case : str , snake_case : str ):
_lowerCAmelCase:Dict = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
_lowerCAmelCase:Optional[int] = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(snake_case ):
os.makedirs(snake_case )
_lowerCAmelCase:Dict = model.state_dict()
def to_tf_var_name(snake_case : str ):
for patt, repl in iter(snake_case ):
_lowerCAmelCase:Any = name.replace(snake_case , snake_case )
return F'bert/{name}'
def create_tf_var(snake_case : np.ndarray , snake_case : str , snake_case : tf.Session ):
_lowerCAmelCase:Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase:Any = tf.get_variable(dtype=snake_case , shape=tensor.shape , name=snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase:List[str] = to_tf_var_name(snake_case )
_lowerCAmelCase:int = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase:int = torch_tensor.T
_lowerCAmelCase:int = create_tf_var(tensor=snake_case , name=snake_case , session=snake_case )
tf.keras.backend.set_value(snake_case , snake_case )
_lowerCAmelCase:Any = session.run(snake_case )
print(F'Successfully created {tf_name}: {np.allclose(snake_case , snake_case )}' )
_lowerCAmelCase:Optional[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(snake_case , os.path.join(snake_case , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def UpperCAmelCase ( snake_case : Optional[int]=None ):
_lowerCAmelCase:Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=snake_case , required=snake_case , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=snake_case , default=snake_case , required=snake_case , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=snake_case , required=snake_case , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=snake_case , required=snake_case , help='''Directory in which to save tensorflow model''' )
_lowerCAmelCase:Tuple = parser.parse_args(snake_case )
_lowerCAmelCase:Union[str, Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 227
| 0
|
'''simple docstring'''
_snake_case : Dict = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_snake_case : Tuple = {value: key for key, value in MORSE_CODE_DICT.items()}
def _a ( _SCREAMING_SNAKE_CASE : str ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _a ( _SCREAMING_SNAKE_CASE : str ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def _a ( ):
_SCREAMING_SNAKE_CASE = "Morse code here!"
print(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = encrypt(_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = decrypt(_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 493
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 493
| 1
|
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: List[str] ) -> Any:
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : str = set({"(", "[", "{"} )
__lowerCamelCase : List[str] = set({")", "]", "}"} )
__lowerCamelCase : Tuple = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCamelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCamelCase ) == 0 or (len(_lowerCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCamelCase ) == 0
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : int = input("Enter sequence of brackets: " )
if is_balanced(_lowerCamelCase ):
print(_lowerCamelCase , "is balanced" )
else:
print(_lowerCamelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 646
|
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
A__ = data
A__ = previous
A__ = next_node
def __str__( self ):
'''simple docstring'''
return f"""{self.data}"""
def lowercase_ ( self ):
'''simple docstring'''
return self.data
def lowercase_ ( self ):
'''simple docstring'''
return self.next
def lowercase_ ( self ):
'''simple docstring'''
return self.previous
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = head
def __iter__( self ):
'''simple docstring'''
return self
def lowercase_ ( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
A__ = self.current.get_data()
A__ = self.current.get_next()
return value
class lowerCAmelCase__ :
def __init__( self ):
'''simple docstring'''
A__ = None # First node in list
A__ = None # Last node in list
def __str__( self ):
'''simple docstring'''
A__ = self.head
A__ = []
while current is not None:
nodes.append(current.get_data() )
A__ = current.get_next()
return " ".join(str(UpperCamelCase__ ) for node in nodes )
def __contains__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.head
while current:
if current.get_data() == value:
return True
A__ = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def lowercase_ ( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowercase_ ( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.head is None:
A__ = node
A__ = node
else:
self.insert_before_node(self.head , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.insert_after_node(self.tail , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = Node(UpperCamelCase__ )
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.set_tail(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = node
A__ = node.previous
if node.get_previous() is None:
A__ = node_to_insert
else:
A__ = node_to_insert
A__ = node_to_insert
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = node
A__ = node.next
if node.get_next() is None:
A__ = node_to_insert
else:
A__ = node_to_insert
A__ = node_to_insert
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = 1
A__ = Node(UpperCamelCase__ )
A__ = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCamelCase__ , UpperCamelCase__ )
return
current_position += 1
A__ = node.next
self.insert_after_node(self.tail , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.head
while node:
if node.get_data() == item:
return node
A__ = node.get_next()
raise Exception("Node not found" )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if (node := self.get_node(UpperCamelCase__ )) is not None:
if node == self.head:
A__ = self.head.get_next()
if node == self.tail:
A__ = self.tail.get_previous()
self.remove_node_pointers(UpperCamelCase__ )
@staticmethod
def lowercase_ ( UpperCamelCase__ ):
'''simple docstring'''
if node.get_next():
A__ = node.previous
if node.get_previous():
A__ = node.next
A__ = None
A__ = None
def lowercase_ ( self ):
'''simple docstring'''
return self.head is None
def __a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337
| 0
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__a = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __UpperCAmelCase ( a_: Dict ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __UpperCAmelCase ( a_: Union[str, Any], a_: List[Any] ):
if args.student_type == "roberta":
_UpperCAmelCase : List[Any] = False
elif args.student_type == "gpt2":
_UpperCAmelCase : Dict = False
def __UpperCAmelCase ( a_: List[Any], a_: Any ):
if args.student_type == "roberta":
_UpperCAmelCase : List[Any] = False
def __UpperCAmelCase ( ):
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force", action="store_true", help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path", type=a_, required=a_, help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file", type=a_, required=a_, help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.", )
parser.add_argument(
"--student_type", type=a_, choices=["distilbert", "roberta", "gpt2"], required=a_, help="The student type (DistilBERT, RoBERTa).", )
parser.add_argument("--student_config", type=a_, required=a_, help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights", default=a_, type=a_, help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type", choices=["bert", "roberta", "gpt2"], required=a_, help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name", type=a_, required=a_, help="The teacher model." )
parser.add_argument("--temperature", default=2.0, type=a_, help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce", default=0.5, type=a_, help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm", default=0.0, type=a_, help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.", )
parser.add_argument("--alpha_clm", default=0.5, type=a_, help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse", default=0.0, type=a_, help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos", default=0.0, type=a_, help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm", action="store_true", help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop", default=0.15, type=a_, help="Proportion of tokens for which we need to make a prediction.", )
parser.add_argument("--word_mask", default=0.8, type=a_, help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep", default=0.1, type=a_, help="Proportion of tokens to keep." )
parser.add_argument("--word_rand", default=0.1, type=a_, help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing", default=0.7, type=a_, help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).", )
parser.add_argument("--token_counts", type=a_, help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask", action="store_true", help="If true, compute the distillation loss only the [MLM] prediction distribution.", )
parser.add_argument(
"--freeze_pos_embs", action="store_true", help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.", )
parser.add_argument(
"--freeze_token_type_embds", action="store_true", help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.", )
parser.add_argument("--n_epoch", type=a_, default=3, help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size", type=a_, default=5, help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size", action="store_false", help="If true, group sequences that have similar length into the same batch. Default is true.", )
parser.add_argument(
"--gradient_accumulation_steps", type=a_, default=50, help="Gradient accumulation for larger training batches.", )
parser.add_argument("--warmup_prop", default=0.05, type=a_, help="Linear warmup proportion." )
parser.add_argument("--weight_decay", default=0.0, type=a_, help="Weight decay if we apply some." )
parser.add_argument("--learning_rate", default=5e-4, type=a_, help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon", default=1e-6, type=a_, help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm", default=5.0, type=a_, help="Max gradient norm." )
parser.add_argument("--initializer_range", default=0.02, type=a_, help="Random initialization range." )
parser.add_argument(
"--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", )
parser.add_argument(
"--fp16_opt_level", type=a_, default="O1", help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
), )
parser.add_argument("--n_gpu", type=a_, default=1, help="Number of GPUs in the node." )
parser.add_argument("--local_rank", type=a_, default=-1, help="Distributed training - Local rank" )
parser.add_argument("--seed", type=a_, default=56, help="Random seed" )
parser.add_argument("--log_interval", type=a_, default=500, help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval", type=a_, default=4_000, help="Checkpoint interval." )
_UpperCAmelCase : Any = parser.parse_args()
sanity_checks(a_ )
# ARGS #
init_gpu_params(a_ )
set_seed(a_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path, "parameters.json" ), "w" ) as f:
json.dump(vars(a_ ), a_, indent=4 )
git_log(args.dump_path )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = MODEL_CLASSES[args.student_type]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_UpperCAmelCase : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_UpperCAmelCase : List[str] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_UpperCAmelCase : Tuple = tokenizer.all_special_tokens.index(a_ )
_UpperCAmelCase : int = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
_UpperCAmelCase : Tuple = special_tok_ids
_UpperCAmelCase : Dict = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file, "rb" ) as fp:
_UpperCAmelCase : Dict = pickle.load(a_ )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts, "rb" ) as fp:
_UpperCAmelCase : Optional[Any] = pickle.load(a_ )
_UpperCAmelCase : Union[str, Any] = np.maximum(a_, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_UpperCAmelCase : Tuple = 0.0 # do not predict special tokens
_UpperCAmelCase : Any = torch.from_numpy(a_ )
else:
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Union[str, Any] = LmSeqsDataset(params=a_, data=a_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
_UpperCAmelCase : int = student_config_class.from_pretrained(args.student_config )
_UpperCAmelCase : Optional[Any] = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
_UpperCAmelCase : str = student_model_class.from_pretrained(args.student_pretrained_weights, config=a_ )
else:
_UpperCAmelCase : str = student_model_class(a_ )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
_UpperCAmelCase : List[Any] = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=a_ )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(a_, a_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(a_, a_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = Distiller(
params=a_, dataset=a_, token_probs=a_, student=a_, teacher=a_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 257
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = XGLMTokenizer
UpperCamelCase_ : int = XGLMTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Tuple = True
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : List[str] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = "<pad>"
_UpperCAmelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_0_0_8 )
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_UpperCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name )
_UpperCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = pickle.dumps(lowerCAmelCase__ )
pickle.loads(lowerCAmelCase__ )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : str = self.get_rust_tokenizer()
_UpperCAmelCase : List[str] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase : Dict = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Any = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = "Hello World!"
_UpperCAmelCase : Union[str, Any] = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
_UpperCAmelCase : Dict = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {
"input_ids": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 257
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = int(_SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :int = divmod(_SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = str(_SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('No input value was provided' )
lowerCAmelCase__ :Dict = '-' if number.startswith('-' ) else ''
lowerCAmelCase__ :int = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F"{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93
| 1
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''umt5'''
_UpperCAmelCase = ['''past_key_values''']
def __init__( self , snake_case=250112 , snake_case=512 , snake_case=64 , snake_case=1024 , snake_case=8 , snake_case=None , snake_case=6 , snake_case=32 , snake_case=128 , snake_case=0.1 , snake_case=1E-6 , snake_case=1.0 , snake_case="gated-gelu" , snake_case=True , snake_case=True , snake_case="T5Tokenizer" , snake_case=True , snake_case=0 , snake_case=1 , snake_case=0 , **snake_case , ) -> List[Any]:
super().__init__(
is_encoder_decoder=snake_case , tokenizer_class=snake_case , tie_word_embeddings=snake_case , pad_token_id=snake_case , eos_token_id=snake_case , decoder_start_token_id=snake_case , **snake_case , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = d_kv
_UpperCAmelCase = d_ff
_UpperCAmelCase = num_layers
_UpperCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCAmelCase = num_heads
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = relative_attention_max_distance
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = feed_forward_proj
_UpperCAmelCase = use_cache
_UpperCAmelCase = self.feed_forward_proj.split('-' )
_UpperCAmelCase = act_info[-1]
_UpperCAmelCase = act_info[0] == 'gated'
if len(snake_case ) > 1 and act_info[0] != "gated" or len(snake_case ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
_UpperCAmelCase = 'gelu_new'
@property
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return self.d_model
@property
def lowerCamelCase_ ( self ) -> Tuple:
return self.num_heads
@property
def lowerCamelCase_ ( self ) -> Optional[int]:
return self.num_layers
class lowercase__ ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCAmelCase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
_UpperCAmelCase = 'past_encoder_sequence + sequence'
_UpperCAmelCase = {0: 'batch'}
_UpperCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCamelCase_ ( self ) -> int:
return 13
@property
def lowerCamelCase_ ( self ) -> float:
return 5E-4
| 24
|
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase ( A : Path , A : list ):
'''simple docstring'''
_UpperCAmelCase = '\n'.join(A )
Path(A ).open('w' ).writelines(A )
lowercase = '''patrickvonplaten/t5-tiny-random'''
lowercase = '''sshleifer/bart-tiny-random'''
lowercase = '''sshleifer/tiny-mbart'''
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(snake_case , snake_case )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case , 'argv' , snake_case ):
run_generate()
assert Path(snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase_ ( self ) -> str:
self.run_eval_tester(snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
self.run_eval_tester(snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> Dict:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / 'scores.json' )
_UpperCAmelCase = str(tmp_dir / 'val.target' )
_dump_articles(snake_case , text['en'] )
_dump_articles(snake_case , text['de'] )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(snake_case , 'argv' , snake_case ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args']
_UpperCAmelCase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case ).exists()
os.remove(Path(snake_case ) )
| 24
| 1
|
'''simple docstring'''
import math
import os
import sys
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ''
try:
with open(_A , 'rb' ) as binary_file:
_lowerCAmelCase : str = binary_file.read()
for dat in data:
_lowerCAmelCase : Dict = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
lexicon.pop(_A )
_lowerCAmelCase : Optional[Any] = last_match_id
if math.loga(_A ).is_integer():
for curr_key in lexicon:
_lowerCAmelCase : int = '0' + lexicon[curr_key]
_lowerCAmelCase : int = bin(_A )[2:]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = {'0': '0', '1': '1'}
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = '', ''
_lowerCAmelCase : int = len(_A )
for i in range(len(_A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCAmelCase : Dict = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_A , _A , _A , _A )
index += 1
_lowerCAmelCase : int = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_lowerCAmelCase : Any = lexicon[curr_string]
result += last_match_id
return result
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = os.path.getsize(_A )
_lowerCAmelCase : int = bin(_A )[2:]
_lowerCAmelCase : Any = len(_A )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = 8
try:
with open(_A , 'wb' ) as opened_file:
_lowerCAmelCase : Tuple = [
to_write[i : i + byte_length]
for i in range(0 , len(_A ) , _A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_A , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = read_file_binary(_A )
_lowerCAmelCase : Any = compress_data(_A )
_lowerCAmelCase : Tuple = add_file_length(_A , _A )
write_file_binary(_A , _A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 444
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : int = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ["""OwlViTFeatureExtractor"""]
lowerCAmelCase : int = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 444
| 1
|
'''simple docstring'''
UpperCAmelCase = [0, 2, 4, 6, 8]
UpperCAmelCase = [1, 3, 5, 7, 9]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case_ = 0
for digit in range(10 ):
snake_case_ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return result
snake_case_ = 0
for digita in range(10 ):
snake_case_ = digita
if (remainder + digita) % 2 == 0:
snake_case_ = ODD_DIGITS
else:
snake_case_ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case_ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
return result
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 9 )-> int:
"""simple docstring"""
snake_case_ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(SCREAMING_SNAKE_CASE , 0 , [0] * length , SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 720
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 7 , SCREAMING_SNAKE_CASE = 100_0000 )-> int:
"""simple docstring"""
snake_case_ = 0
snake_case_ = 1
for current_denominator in range(1 , limit + 1 ):
snake_case_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case_ = current_numerator
snake_case_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 531
| 0
|
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Optional[int] =logging.get_logger(__name__)
_UpperCamelCase : Tuple ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Any = "canine"
def __init__( self ,A__=768 ,A__=12 ,A__=12 ,A__=3072 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=16384 ,A__=16 ,A__=0.02 ,A__=1E-12 ,A__=0 ,A__=0xE_000 ,A__=0xE_001 ,A__=4 ,A__=4 ,A__=8 ,A__=16384 ,A__=128 ,**A__ ,):
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__ )
_A : int = max_position_embeddings
_A : Dict = hidden_size
_A : List[str] = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[Any] = hidden_act
_A : str = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : Dict = type_vocab_size
_A : Optional[Any] = layer_norm_eps
# Character config:
_A : Union[str, Any] = downsampling_rate
_A : List[str] = upsampling_kernel_size
_A : str = num_hash_functions
_A : Optional[Any] = num_hash_buckets
_A : Optional[Any] = local_transformer_stride
| 206
| 0
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase_ : List[Any] = threading.Lock()
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : int = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
UpperCAmelCase_ : Dict = logging.WARNING
UpperCAmelCase_ : Tuple = True
def lowerCAmelCase_ ( ):
__magic_name__ : Tuple =os.getenv("""TRANSFORMERS_VERBOSITY""" , a_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def lowerCAmelCase_ ( ):
return __name__.split(""".""" )[0]
def lowerCAmelCase_ ( ):
return logging.getLogger(_get_library_name() )
def lowerCAmelCase_ ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ : Tuple =logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ : Union[str, Any] =sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ : List[str] =_get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ : Tuple =False
def lowerCAmelCase_ ( ):
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ : Optional[Any] =_get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ : int =None
def lowerCAmelCase_ ( ):
return log_levels
def lowerCAmelCase_ ( lowerCamelCase = None ):
if name is None:
__magic_name__ : List[Any] =_get_library_name()
_configure_library_root_logger()
return logging.getLogger(a_ )
def lowerCAmelCase_ ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase_ ( lowerCamelCase ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(a_ )
def lowerCAmelCase_ ( ):
return set_verbosity(a_ )
def lowerCAmelCase_ ( ):
return set_verbosity(a_ )
def lowerCAmelCase_ ( ):
return set_verbosity(a_ )
def lowerCAmelCase_ ( ):
return set_verbosity(a_ )
def lowerCAmelCase_ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowerCAmelCase_ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowerCAmelCase_ ( lowerCamelCase ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(a_ )
def lowerCAmelCase_ ( lowerCamelCase ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(a_ )
def lowerCAmelCase_ ( ):
_configure_library_root_logger()
__magic_name__ : List[str] =False
def lowerCAmelCase_ ( ):
_configure_library_root_logger()
__magic_name__ : str =True
def lowerCAmelCase_ ( ):
__magic_name__ : Union[str, Any] =_get_library_root_logger().handlers
for handler in handlers:
__magic_name__ : Any =logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(a_ )
def lowerCAmelCase_ ( ):
__magic_name__ : Optional[Any] =_get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(a_ )
def lowerCAmelCase_ ( self , *lowerCamelCase , **lowerCamelCase ):
__magic_name__ : str =os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , a_ )
if no_advisory_warnings:
return
self.warning(*a_ , **a_ )
UpperCAmelCase_ : Tuple = warning_advice
@functools.lru_cache(a_ )
def lowerCAmelCase_ ( self , *lowerCamelCase , **lowerCamelCase ):
self.warning(*a_ , **a_ )
UpperCAmelCase_ : List[Any] = warning_once
class __A :
def __init__( self :Any , *__snake_case :str , **__snake_case :List[str] ): # pylint: disable=unused-argument
'''simple docstring'''
__magic_name__ : Union[str, Any] =args[0] if args else None
def __iter__( self :Optional[int] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self :Optional[int] , __snake_case :Dict ):
'''simple docstring'''
def empty_fn(*__snake_case :List[Any] , **__snake_case :Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self :Union[str, Any] ):
'''simple docstring'''
return self
def __exit__( self :Optional[int] , __snake_case :Any , __snake_case :List[str] , __snake_case :Optional[Any] ):
'''simple docstring'''
return
class __A :
def __call__( self :Optional[int] , *__snake_case :Dict , **__snake_case :Optional[Any] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*__snake_case , **__snake_case )
else:
return EmptyTqdm(*__snake_case , **__snake_case )
def A__ ( self :int , *__snake_case :Union[str, Any] , **__snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__snake_case , **__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase_ : Tuple = _tqdm_cls()
def lowerCAmelCase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowerCAmelCase_ ( ):
global _tqdm_active
__magic_name__ : List[str] =True
hf_hub_utils.enable_progress_bars()
def lowerCAmelCase_ ( ):
global _tqdm_active
__magic_name__ : List[Any] =False
hf_hub_utils.disable_progress_bars()
| 705
|
import numpy as np
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 1E-12 , lowerCamelCase = 100 , ):
assert np.shape(lowerCamelCase )[0] == np.shape(lowerCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase )[0] == np.shape(lowerCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase ) == np.iscomplexobj(lowerCamelCase )
__magic_name__ : List[Any] =np.iscomplexobj(lowerCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__magic_name__ : List[Any] =False
__magic_name__ : List[Any] =0
__magic_name__ : List[str] =0
__magic_name__ : str =1E12
while not convergence:
# Multiple matrix by the vector.
__magic_name__ : Dict =np.dot(lowerCamelCase , lowerCamelCase )
# Normalize the resulting output vector.
__magic_name__ : List[str] =w / np.linalg.norm(lowerCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__magic_name__ : Any =vector.conj().T if is_complex else vector.T
__magic_name__ : List[str] =np.dot(lowerCamelCase , np.dot(lowerCamelCase , lowerCamelCase ) )
# Check convergence.
__magic_name__ : List[str] =np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__magic_name__ : int =True
__magic_name__ : Optional[Any] =lambda_
if is_complex:
__magic_name__ : Union[str, Any] =np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_ ( ):
__magic_name__ : Optional[int] =np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__magic_name__ : Any =np.array([41, 4, 20] )
__magic_name__ : Optional[Any] =real_input_matrix.astype(np.complexaaa )
__magic_name__ : str =np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__magic_name__ : List[Any] =np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__magic_name__ : Union[str, Any] =real_input_matrix
__magic_name__ : List[Any] =real_vector
elif problem_type == "complex":
__magic_name__ : Optional[Any] =complex_input_matrix
__magic_name__ : Tuple =complex_vector
# Our implementation.
__magic_name__ , __magic_name__ : Any =power_iteration(lowerCamelCase , lowerCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__magic_name__ , __magic_name__ : int =np.linalg.eigh(lowerCamelCase )
# Last eigenvalue is the maximum one.
__magic_name__ : List[str] =eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__magic_name__ : Dict =eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase ) - np.abs(lowerCamelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 367
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
def __init__( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Tuple=13 ,lowerCamelCase__ : int=32 ,lowerCamelCase__ : List[str]=3 ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : Any=[10, 20, 30, 40] ,lowerCamelCase__ : Tuple=[2, 2, 3, 2] ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Any=37 ,lowerCamelCase__ : Optional[Any]="gelu" ,lowerCamelCase__ : Optional[int]=10 ,lowerCamelCase__ : Any=0.0_2 ,lowerCamelCase__ : Union[str, Any]=["stage2", "stage3", "stage4"] ,lowerCamelCase__ : Optional[int]=[2, 3, 4] ,lowerCamelCase__ : int=None ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : List[Any] = num_stages
_UpperCamelCase : Optional[int] = hidden_sizes
_UpperCamelCase : Optional[int] = depths
_UpperCamelCase : Union[str, Any] = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[Any] = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Any = initializer_range
_UpperCamelCase : str = out_features
_UpperCamelCase : Dict = out_indices
_UpperCamelCase : Dict = scope
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Tuple = None
if self.use_labels:
_UpperCamelCase : int = ids_tensor([self.batch_size] ,self.num_labels )
_UpperCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
_UpperCamelCase : Tuple = ConvNextModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : List[Any] = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : List[str] = ConvNextForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : int = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Union[str, Any] = model(lowerCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[str] = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : int = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ConvNextModelTester(self )
_UpperCamelCase : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = model_class(lowerCamelCase__ )
_UpperCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Any = [*signature.parameters.keys()]
_UpperCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ):
_UpperCamelCase : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : Tuple = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[str] = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : str = ConvNextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A__ ( ):
_UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(lowerCamelCase__ )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : int = prepare_img()
_UpperCamelCase : int = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase : Tuple = model(**lowerCamelCase__ )
# verify the logits
_UpperCamelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
_UpperCamelCase : Tuple = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1E-4 ) )
@require_torch
class lowercase__ ( unittest.TestCase , lowercase ):
lowercase__ = (ConvNextBackbone,) if is_torch_available() else ()
lowercase__ = ConvNextConfig
lowercase__ = False
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = ConvNextModelTester(self )
| 195
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0_0_0 ):
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Any = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 195
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 16
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A :
def __init__(self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int=1_3 , __UpperCAmelCase : Union[str, Any]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : List[str]=9_9 , __UpperCAmelCase : Optional[Any]=3_2 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : List[str]=3_7 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : str=5_1_2 , __UpperCAmelCase : Union[str, Any]=1_6 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=0 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
UpperCAmelCase__ = projection_dim
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
UpperCAmelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = TFDPRContextEncoder(config=lowerCAmelCase__ )
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
UpperCAmelCase__ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
UpperCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase_ (self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = TFDPRQuestionEncoder(config=lowerCAmelCase__ )
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
UpperCAmelCase__ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
UpperCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase_ (self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = TFDPRReader(config=lowerCAmelCase__ )
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class A ( a__ , a__ , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Any = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
def lowercase_ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDPRModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def lowercase_ (self : Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowerCAmelCase__ )
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowerCAmelCase__ )
def lowercase_ (self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowerCAmelCase__ )
@slow
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFDPRContextEncoder.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFDPRContextEncoder.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFDPRQuestionEncoder.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFDPRReader.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
UpperCAmelCase__ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
UpperCAmelCase__ = model(lowerCAmelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
UpperCAmelCase__ = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 486
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 633
| 0
|
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _lowercase :
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=False , A__=True , A__=99 , A__=64 , A__=5 , A__=4 , A__=64 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__=None , ) -> Tuple:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def UpperCamelCase ( self ) -> Tuple:
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def UpperCamelCase ( self ) -> Dict:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self ) -> List[Any]:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
snake_case = MPNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case = model(snake_case__ , snake_case__ )
snake_case = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ ) -> Dict:
snake_case = MPNetForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case = model(
snake_case__ , attention_mask=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
snake_case = self.num_labels
snake_case = MPNetForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
snake_case = self.num_choices
snake_case = MPNetForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = model(
snake_case__ , attention_mask=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
snake_case = self.num_labels
snake_case = MPNetForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self ) -> int:
snake_case = self.prepare_config_and_inputs()
(snake_case) = config_and_inputs
snake_case = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_UpperCAmelCase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = True
def UpperCamelCase ( self ) -> List[str]:
snake_case = MPNetModelTester(self )
snake_case = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case__ )
def UpperCamelCase ( self ) -> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self ) -> Tuple:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case__ )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case__ )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
snake_case = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
snake_case = model(snake_case__ )[0]
snake_case = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , snake_case__ )
snake_case = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 718
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = value
snake_case = None
snake_case = None
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = tree
def UpperCamelCase ( self , A__ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = '''fnet'''
def __init__( self : Optional[Any] , UpperCamelCase : Optional[int]=3_20_00 , UpperCamelCase : int=7_68 , UpperCamelCase : List[str]=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Tuple="gelu_new" , UpperCamelCase : str=0.1 , UpperCamelCase : int=5_12 , UpperCamelCase : List[str]=4 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : List[str]=False , UpperCamelCase : int=5_12 , UpperCamelCase : Any=3 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : Any=2 , **UpperCamelCase : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_lowercase : List[str] = vocab_size
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : int = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Optional[int] = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : str = initializer_range
_lowercase : Any = type_vocab_size
_lowercase : str = layer_norm_eps
_lowercase : Union[str, Any] = use_tpu_fourier_optimizations
_lowercase : int = tpu_short_seq_length
| 322
|
import argparse
import json
from tqdm import tqdm
def UpperCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=UpperCAmelCase_ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=UpperCAmelCase_ , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=UpperCAmelCase_ , help='''where to store parsed gold_data_path file''' , )
_lowercase : Dict = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowercase : str = json.load(UpperCAmelCase_ )
for dpr_record in tqdm(UpperCAmelCase_ ):
_lowercase : Optional[Any] = dpr_record['''question''']
_lowercase : Union[str, Any] = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(UpperCAmelCase_ ) + '''\n''' )
if __name__ == "__main__":
main()
| 322
| 1
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase__ = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Optional[Any] = len([g for position, g in enumerate(_snake_case ) if g == main_target[position]] )
return (item, float(_snake_case ))
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : str = random.randint(0 ,len(_snake_case ) - 1 )
UpperCAmelCase__ : str = parent_a[:random_slice] + parent_a[random_slice:]
UpperCAmelCase__ : int = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Tuple = list(_snake_case )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
UpperCAmelCase__ : int = random.choice(_snake_case )
return "".join(_snake_case )
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,):
UpperCAmelCase__ : Tuple = []
# Generate more children proportionally to the fitness score.
UpperCAmelCase__ : int = int(parent_a[1] * 100 ) + 1
UpperCAmelCase__ : Any = 10 if child_n >= 10 else child_n
for _ in range(_snake_case ):
UpperCAmelCase__ : Any = population_score[random.randint(0 ,_snake_case )][0]
UpperCAmelCase__ : Union[str, Any] = crossover(parent_a[0] ,_snake_case )
# Append new string to the population list.
pop.append(mutate(_snake_case ,_snake_case ) )
pop.append(mutate(_snake_case ,_snake_case ) )
return pop
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCAmelCase__ : int = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_snake_case )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCAmelCase__ : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCAmelCase__ : str = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_snake_case )
# Generate random starting population.
UpperCAmelCase__ : Any = []
for _ in range(_snake_case ):
population.append(''.join([random.choice(_snake_case ) for i in range(len(_snake_case ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCAmelCase__ : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_snake_case )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCAmelCase__ : Union[str, Any] = [evaluate(_snake_case ,_snake_case ) for item in population]
# Check if there is a matching evolution.
UpperCAmelCase__ : List[str] = sorted(_snake_case ,key=lambda _snake_case : x[1] ,reverse=_snake_case )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCAmelCase__ : int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_snake_case )
# Normalize population score to be between 0 and 1.
UpperCAmelCase__ : List[Any] = [
(item, score / len(_snake_case )) for item, score in population_score
]
# This is selection
for i in range(_snake_case ):
population.extend(select(population_score[int(_snake_case )] ,_snake_case ,_snake_case ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_snake_case ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase__ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
UpperCamelCase__ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 717
|
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class a ( lowercase ):
UpperCamelCase : Optional[int] = ["""pixel_values"""]
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 255 , UpperCamelCase_ = True , UpperCamelCase_ = IMAGENET_DEFAULT_MEAN , UpperCamelCase_ = IMAGENET_DEFAULT_STD , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = size if size is not None else {'shortest_edge': 224}
UpperCAmelCase__ : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
UpperCAmelCase__ : Tuple = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ : str = get_size_dict(UpperCamelCase_ , param_name='crop_size' )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Any = size
UpperCAmelCase__ : Tuple = resample
UpperCAmelCase__ : List[str] = do_center_crop
UpperCAmelCase__ : Optional[Any] = crop_size
UpperCAmelCase__ : Optional[Any] = do_rescale
UpperCAmelCase__ : List[Any] = rescale_factor
UpperCAmelCase__ : str = do_normalize
UpperCAmelCase__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ):
UpperCAmelCase__ : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ : List[Any] = int((256 / 224) * size['shortest_edge'] )
UpperCAmelCase__ : str = get_resize_output_image_size(UpperCamelCase_ , size=UpperCamelCase_ , default_to_square=UpperCamelCase_ )
UpperCAmelCase__ : Dict = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
UpperCAmelCase__ : Any = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size['height'], size['width']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
UpperCAmelCase__ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : List[Any] = resample if resample is not None else self.resample
UpperCAmelCase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : List[str] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Union[str, Any] = size if size is not None else self.size
UpperCAmelCase__ : Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
UpperCAmelCase__ : Any = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Dict = get_size_dict(UpperCamelCase_ , param_name='crop_size' )
UpperCAmelCase__ : List[str] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Union[str, Any] = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
UpperCAmelCase__ : List[str] = [self.resize(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Optional[Any] = [self.center_crop(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase__ : int = [self.rescale(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase__ : int = [self.normalize(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for image in images]
UpperCAmelCase__ : Any = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
UpperCAmelCase__ : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 254
| 0
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : Optional[int]=24 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : List[str]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : str=16 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Dict=1_000 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : Tuple = seq_length
snake_case : int = is_training
snake_case : Any = use_input_mask
snake_case : Any = use_token_type_ids
snake_case : int = use_labels
snake_case : str = vocab_size
snake_case : Tuple = hidden_size
snake_case : str = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : List[str] = intermediate_size
snake_case : int = hidden_act
snake_case : Dict = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : str = type_sequence_label_size
snake_case : Any = initializer_range
snake_case : int = num_labels
snake_case : List[str] = scope
snake_case : List[str] = range_bbox
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case : Optional[int] = bbox[i, j, 3]
snake_case : List[Any] = bbox[i, j, 1]
snake_case : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case : List[Any] = bbox[i, j, 2]
snake_case : Optional[int] = bbox[i, j, 0]
snake_case : str = t
snake_case : Union[str, Any] = None
if self.use_input_mask:
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case : Optional[Any] = None
if self.use_token_type_ids:
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : List[Any] = None
snake_case : List[str] = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Union[str, Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : Dict = LiltModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case : Optional[int] = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case : Union[str, Any] = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case : List[str] = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
snake_case : Optional[Any] = self.num_labels
snake_case : List[Any] = LiltForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case : Optional[Any] = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , ):
"""simple docstring"""
snake_case : Tuple = LiltForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case : Tuple = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Dict = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Optional[int] = config_and_inputs
snake_case : Optional[int] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : Optional[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ : int = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Any = False
A__ : Tuple = False
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int ):
"""simple docstring"""
return True
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[Any] = LiltModelTester(self )
snake_case : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : str = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Union[str, Any] = LiltModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
@slow
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(UpperCAmelCase_ )
snake_case : List[str] = torch.tensor([[1, 2]] , device=UpperCAmelCase_ )
snake_case : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case : str = model(input_ids=UpperCAmelCase_ , bbox=UpperCAmelCase_ )
snake_case : Union[str, Any] = torch.Size([1, 2, 768] )
snake_case : Optional[Any] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=UpperCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase_ , atol=1e-3 ) )
| 598
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=14 ,UpperCAmelCase_=7 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=99 ,UpperCAmelCase_=32 ,UpperCAmelCase_=5 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=512 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,UpperCAmelCase_=4 ,UpperCAmelCase_=None ,) -> Dict:
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_token_type_ids
lowercase__ = use_input_mask
lowercase__ = use_labels
lowercase__ = use_mc_token_ids
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = self.vocab_size - 1
def _a ( self ) -> Any:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase__ = None
if self.use_mc_token_ids:
lowercase__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase__ = ids_tensor([self.batch_size] ,self.num_choices )
lowercase__ = self.get_config()
lowercase__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self ) -> Optional[int]:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,*UpperCAmelCase_ ) -> List[Any]:
lowercase__ = CTRLModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
model(UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,head_mask=UpperCAmelCase_ )
model(UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,*UpperCAmelCase_ ) -> Optional[int]:
lowercase__ = CTRLLMHeadModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self ) -> Union[str, Any]:
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,*UpperCAmelCase_ ) -> Optional[int]:
lowercase__ = self.num_labels
lowercase__ = CTRLForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ = model(UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case (UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase__ :Dict = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase__ :int = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ :str = True
lowerCAmelCase__ :Tuple = False
lowerCAmelCase__ :List[Any] = False
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _a ( self ) -> str:
lowercase__ = CTRLModelTester(self )
lowercase__ = ConfigTester(self ,config_class=UpperCAmelCase_ ,n_embd=37 )
def _a ( self ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCAmelCase_ )
def _a ( self ) -> Optional[Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a ( self ) -> Tuple:
pass
@slow
def _a ( self ) -> List[str]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = CTRLModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _a ( self ) -> Tuple:
pass
@require_torch
class snake_case (unittest.TestCase ):
def _a ( self ) -> int:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _a ( self ) -> int:
lowercase__ = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(UpperCAmelCase_ )
lowercase__ = torch.tensor(
[[11_859, 0, 1_611, 8]] ,dtype=torch.long ,device=UpperCAmelCase_ ) # Legal the president is
lowercase__ = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase__ = model.generate(UpperCAmelCase_ ,do_sample=UpperCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() ,UpperCAmelCase_ )
| 267
| 0
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> bool:
__a = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase ( lowerCAmelCase__ : int = 5000 ) -> int:
__a = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase__ )]
for i, pentagonal_i in enumerate(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
__a = pentagonal_nums[j]
__a = pentagonal_i + pentagonal_j
__a = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase__ ) and is_pentagonal(lowerCAmelCase__ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 702
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ) -> str:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Tuple:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> Optional[Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = text_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [text_path]
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=("train",) ) -> Optional[Any]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
__a = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader({'''train''': text_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> str:
__a = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader({'''train''': text_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Dict:
if split:
__a = {split: text_path}
else:
__a = '''train'''
__a = {'''train''': text_path, '''test''': text_path}
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 65
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '▁'
_SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
_SCREAMING_SNAKE_CASE = {
'facebook/xglm-564M': 2_0_4_8,
}
class __lowercase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a : Optional[int] = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase ,_lowerCamelCase="<s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="<s>" ,_lowerCamelCase="<unk>" ,_lowerCamelCase="<pad>" ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__lowercase = 7
__lowercase = [f"<madeupword{i}>" for i in range(self.num_madeup_words )]
__lowercase = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,sp_model_kwargs=self.sp_model_kwargs ,**_SCREAMING_SNAKE_CASE ,)
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
__lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
__lowercase = len(self.sp_model )
__lowercase = {f"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_SCREAMING_SNAKE_CASE )
__lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.__dict__.copy()
__lowercase = None
__lowercase = self.sp_model.serialized_model_proto()
return state
def __setstate__(self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__lowercase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE ,token_ids_a=_SCREAMING_SNAKE_CASE ,already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE ))
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_SCREAMING_SNAKE_CASE ,out_type=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = "".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE ,''' ''' ).strip()
return out_string
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 502
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: Union[str, Any] , *_SCREAMING_SNAKE_CASE: Any , **_SCREAMING_SNAKE_CASE: int) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Union[str, Any] , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: int , *_SCREAMING_SNAKE_CASE: Any , **_SCREAMING_SNAKE_CASE: str) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: Tuple , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: int) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: int) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: Any , **_SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[Any] , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: int) -> int:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: Tuple , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[str] , *_SCREAMING_SNAKE_CASE: Tuple , **_SCREAMING_SNAKE_CASE: int) -> int:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[int] , *_SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Any) -> int:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: Optional[Any] , *_SCREAMING_SNAKE_CASE: Union[str, Any] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> str:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[int] , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Tuple) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: int , *_SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: List[Any] , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: List[str]) -> int:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Union[str, Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: str , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
| 293
| 0
|
import os
from collections.abc import Iterator
def lowerCamelCase__ ( UpperCamelCase__ : str = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(UpperCamelCase__ ):
_snake_case = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCamelCase__ )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCamelCase__ , UpperCamelCase__ ).lstrip('./' )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
_snake_case = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCamelCase__ ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(UpperCamelCase__ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def lowerCamelCase__ ( UpperCamelCase__ : str = "." ) -> None:
'''simple docstring'''
_snake_case = ''
for filepath in sorted(good_file_paths(UpperCamelCase__ ) ):
_snake_case , _snake_case = os.path.split(UpperCamelCase__ )
if filepath != old_path:
_snake_case = print_path(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = (filepath.count(os.sep ) + 1) if filepath else 0
_snake_case = F'''{filepath}/{filename}'''.replace(' ' , '%20' )
_snake_case = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'''{md_prefix(UpperCamelCase__ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(""".""")
| 541
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 541
| 1
|
def lowerCamelCase__ ( a : int ) -> int:
"""simple docstring"""
a__ :int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCamelCase__ ( a : int = 5_000 ) -> int:
"""simple docstring"""
a__ :int = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE_ )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
a__ :Tuple = pentagonal_nums[j]
a__ :Union[str, Any] = pentagonal_i + pentagonal_j
a__ :int = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE_ ) and is_pentagonal(SCREAMING_SNAKE_CASE_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 395
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Dict = """luke"""
def __init__( self , a=5_0267 , a=50_0000 , a=768 , a=256 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1e-12 , a=True , a=None , a=1 , a=0 , a=2 , **a , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[Any] = entity_vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[str] = entity_emb_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : List[str] = hidden_act
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : Any = layer_norm_eps
lowercase__ : Optional[Any] = use_entity_aware_attention
lowercase__ : Union[str, Any] = classifier_dropout
| 164
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a_ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_A : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_A : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_A : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __UpperCamelCase (self ):
snake_case_ : str = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
snake_case_ : Any = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
snake_case_ : Dict = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
snake_case_ : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
snake_case_ : Tuple = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
snake_case_ : Optional[Any] = text_classifier("""This is great !""" , return_all_scores=lowercase__ )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
snake_case_ : List[Any] = text_classifier("""This is great !""" , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
snake_case_ : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
snake_case_ : Optional[int] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def __UpperCamelCase (self ):
import torch
snake_case_ : Optional[int] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
snake_case_ : Optional[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def __UpperCamelCase (self ):
snake_case_ : str = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
snake_case_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def __UpperCamelCase (self ):
snake_case_ : Tuple = pipeline("""text-classification""" )
snake_case_ : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
snake_case_ : List[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
snake_case_ : List[str] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def __UpperCamelCase (self ):
snake_case_ : int = pipeline("""text-classification""" , framework="""tf""" )
snake_case_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
snake_case_ : List[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
snake_case_ : Optional[int] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : int = TextClassificationPipeline(model=lowercase__ , tokenizer=lowercase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Dict = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
snake_case_ : Union[str, Any] = """HuggingFace is in"""
snake_case_ : str = text_classifier(lowercase__ )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
snake_case_ : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
snake_case_ : List[str] = text_classifier(lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}, {"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
snake_case_ : List[Any] = text_classifier(lowercase__ , top_k=lowercase__ )
snake_case_ : int = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowercase__ ) , [[{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] * N, [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] * N] , )
snake_case_ : List[str] = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
snake_case_ : Optional[int] = text_classifier(lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , {"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
snake_case_ : Optional[int] = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(lowercase__ ):
text_classifier(lowercase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
snake_case_ : Union[str, Any] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
A__: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ) -> int:
# initialize config
if "resnet-50" in model_name:
_a : List[Any] =ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
_a : Any =ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
_a : Dict =DetrConfig(use_timm_backbone=_UpperCAmelCase ,backbone_config=_UpperCAmelCase )
# set label attributes
_a : Dict ="""panoptic""" in model_name
if is_panoptic:
_a : str =250
else:
_a : Optional[int] =91
_a : Union[str, Any] ="""huggingface/label-files"""
_a : List[Any] ="""coco-detection-id2label.json"""
_a : Tuple =json.load(open(hf_hub_download(_UpperCAmelCase ,_UpperCAmelCase ,repo_type="""dataset""" ) ,"""r""" ) )
_a : List[str] ={int(_UpperCAmelCase ): v for k, v in idalabel.items()}
_a : Any =idalabel
_a : Optional[Any] ={v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
# here we list all keys to be renamed (original name on the left, our name on the right)
_a : str =[]
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
F"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
F"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Dict ) -> str:
_a : Optional[int] =state_dict.pop(_UpperCAmelCase )
_a : List[str] =val
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Any=False ) -> Optional[Any]:
_a : Optional[Any] =""""""
if is_panoptic:
_a : Union[str, Any] ="""detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_a : List[Any] =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_a : Tuple =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_a : int =in_proj_weight[:256, :]
_a : Tuple =in_proj_bias[:256]
_a : Union[str, Any] =in_proj_weight[256:512, :]
_a : List[Any] =in_proj_bias[256:512]
_a : Dict =in_proj_weight[-256:, :]
_a : Optional[int] =in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_a : Optional[int] =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_a : List[str] =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_a : str =in_proj_weight[:256, :]
_a : List[str] =in_proj_bias[:256]
_a : List[Any] =in_proj_weight[256:512, :]
_a : Optional[Any] =in_proj_bias[256:512]
_a : Union[str, Any] =in_proj_weight[-256:, :]
_a : Optional[Any] =in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_a : Optional[Any] =state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_a : Union[str, Any] =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_a : List[Any] =in_proj_weight_cross_attn[:256, :]
_a : int =in_proj_bias_cross_attn[:256]
_a : Optional[Any] =in_proj_weight_cross_attn[256:512, :]
_a : List[str] =in_proj_bias_cross_attn[256:512]
_a : List[str] =in_proj_weight_cross_attn[-256:, :]
_a : Optional[int] =in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
_a : Optional[Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : str =Image.open(requests.get(_UpperCAmelCase ,stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : Union[str, Any]=False ) -> Optional[Any]:
_a , _a : Optional[int] =get_detr_config(_UpperCAmelCase )
# load original model from torch hub
_a : List[str] ={
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(F"Converting model {model_name}..." )
_a : Tuple =torch.hub.load("""facebookresearch/detr""" ,model_name_to_original_name[model_name] ,pretrained=_UpperCAmelCase ).eval()
_a : List[Any] =detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_UpperCAmelCase ):
if is_panoptic:
_a : Tuple ="""detr.""" + src
rename_key(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase ,is_panoptic=_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_a : List[str] ="""detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_a : List[str] =state_dict.pop(_UpperCAmelCase )
_a : int =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_a : Dict =state_dict.pop(_UpperCAmelCase )
_a : int =val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_a : Tuple =state_dict.pop(_UpperCAmelCase )
_a : Optional[Any] =val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_a : Any =state_dict.pop(_UpperCAmelCase )
_a : Optional[Any] =val
# finally, create HuggingFace model and load state dict
_a : List[str] =DetrForSegmentation(_UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# verify our conversion on an image
_a : Tuple ="""coco_panoptic""" if is_panoptic else """coco_detection"""
_a : Optional[Any] =DetrImageProcessor(format=_UpperCAmelCase )
_a : Any =processor(images=prepare_img() ,return_tensors="""pt""" )
_a : int =encoding["""pixel_values"""]
_a : Union[str, Any] =detr(_UpperCAmelCase )
_a : Dict =model(_UpperCAmelCase )
assert torch.allclose(outputs.logits ,original_outputs["""pred_logits"""] ,atol=1e-3 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["""pred_boxes"""] ,atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["""pred_masks"""] ,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(F"nielsr/{model_name}" )
processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A__: int = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
A__: Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 694
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCamelCase (unittest.TestCase ):
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : Tuple=7 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Tuple=1_8 , lowerCamelCase_ : str=3_0 , lowerCamelCase_ : Tuple=4_0_0 , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=False , ):
"""simple docstring"""
_lowercase : Union[str, Any] = size if size is not None else {'height': 2_0, 'width': 2_0}
_lowercase : str = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
_lowercase : List[str] = parent
_lowercase : int = batch_size
_lowercase : Optional[int] = num_channels
_lowercase : List[str] = image_size
_lowercase : Union[str, Any] = min_resolution
_lowercase : Any = max_resolution
_lowercase : Tuple = do_resize
_lowercase : int = size
_lowercase : List[Any] = do_center_crop
_lowercase : str = crop_size
_lowercase : Optional[Any] = do_normalize
_lowercase : Dict = image_mean
_lowercase : Tuple = image_std
_lowercase : Union[str, Any] = do_reduce_labels
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : Optional[Any] = load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
_lowercase : List[Any] = Image.open(dataset[0]['file'] )
_lowercase : Union[str, Any] = Image.open(dataset[1]['file'] )
return image, map
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : List[str] = load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
_lowercase : Union[str, Any] = Image.open(ds[0]['file'] )
_lowercase : List[Any] = Image.open(ds[1]['file'] )
_lowercase : Union[str, Any] = Image.open(ds[2]['file'] )
_lowercase : Dict = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = BeitImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : Tuple = BeitImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_lowercase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'image_std' ) )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 2_0, 'width': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase_ )
_lowercase : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=lowerCamelCase_ )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
_lowercase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase : Optional[int] = image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
_lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase : Dict = image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
_lowercase : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase : List[Any] = image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_lowercase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
_lowercase : int = []
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_lowercase : int = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test batched
_lowercase : List[str] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
_lowercase , _lowercase : List[str] = prepare_semantic_single_inputs()
_lowercase : Optional[int] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test batched input (PIL images)
_lowercase , _lowercase : Optional[Any] = prepare_semantic_batch_inputs()
_lowercase : Any = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowercase , _lowercase : Union[str, Any] = prepare_semantic_single_inputs()
_lowercase : Optional[Any] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 1_5_0 )
_lowercase : Union[str, Any] = True
_lowercase : Any = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
| 283
|
"""simple docstring"""
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
stooge(__UpperCAmelCase ,0 ,len(__UpperCAmelCase ) - 1 )
return arr
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowercase , _lowercase : List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowercase : Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__UpperCAmelCase ,__UpperCAmelCase ,(h - t) )
# Recursively sort last 2/3 elements
stooge(__UpperCAmelCase ,i + t ,(__UpperCAmelCase) )
# Recursively sort first 2/3 elements
stooge(__UpperCAmelCase ,__UpperCAmelCase ,(h - t) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 283
| 1
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "efficientnet"
def __init__( self : List[Any] , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 600 , _lowerCAmelCase : float = 2.0 , _lowerCAmelCase : float = 3.1 , _lowerCAmelCase : int = 8 , _lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , _lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , _lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , _lowerCAmelCase : List[int] = [] , _lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , _lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , _lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , _lowerCAmelCase : float = 0.25 , _lowerCAmelCase : str = "swish" , _lowerCAmelCase : int = 2_560 , _lowerCAmelCase : str = "mean" , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 0.001 , _lowerCAmelCase : float = 0.99 , _lowerCAmelCase : float = 0.5 , _lowerCAmelCase : float = 0.2 , **_lowerCAmelCase : str , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = width_coefficient
SCREAMING_SNAKE_CASE_ = depth_coefficient
SCREAMING_SNAKE_CASE_ = depth_divisor
SCREAMING_SNAKE_CASE_ = kernel_sizes
SCREAMING_SNAKE_CASE_ = in_channels
SCREAMING_SNAKE_CASE_ = out_channels
SCREAMING_SNAKE_CASE_ = depthwise_padding
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = num_block_repeats
SCREAMING_SNAKE_CASE_ = expand_ratios
SCREAMING_SNAKE_CASE_ = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = pooling_type
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = batch_norm_eps
SCREAMING_SNAKE_CASE_ = batch_norm_momentum
SCREAMING_SNAKE_CASE_ = dropout_rate
SCREAMING_SNAKE_CASE_ = drop_connect_rate
SCREAMING_SNAKE_CASE_ = sum(_lowerCAmelCase ) * 4
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1E-5
| 31
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowercase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"config.{attribute}" in modeling_source
or f"getattr(config, \"{attribute}\"" in modeling_source
or f"getattr(self.config, \"{attribute}\"" in modeling_source
):
A_ = True
# Deal with multi-line cases
elif (
re.search(
Rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , SCREAMING_SNAKE_CASE , )
is not None
):
A_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
A_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
A_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
A_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
A_ = True
if not attribute_used:
A_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
A_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
A_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
A_ = True
elif attribute.endswith('''_token_id''' ):
A_ = True
# configuration class specific cases
if not case_allowed:
A_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
A_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = dict(inspect.signature(config_class.__init__ ).parameters )
A_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
A_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
A_ = {}
if len(config_class.attribute_map ) > 0:
A_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
A_ = inspect.getsourcefile(SCREAMING_SNAKE_CASE )
A_ = os.path.dirname(SCREAMING_SNAKE_CASE )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
A_ = [os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for fn in os.listdir(SCREAMING_SNAKE_CASE ) if fn.startswith('''modeling_''' )]
# Get the source code strings
A_ = []
for path in modeling_paths:
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE ) as fp:
modeling_sources.append(fp.read() )
A_ = []
for config_param, default_value in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# `attributes` here is all the variant names for `config_param`
A_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
unused_attributes.append(attributes[0] )
return sorted(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
A_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda SCREAMING_SNAKE_CASE : inspect.isclass(SCREAMING_SNAKE_CASE )
and issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and inspect.getmodule(SCREAMING_SNAKE_CASE ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
A_ = check_config_attributes_being_used(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
A_ = unused_attributes
if len(SCREAMING_SNAKE_CASE ) > 0:
A_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f"{name}: {attributes}\n"
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
check_config_attributes()
| 203
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(UpperCamelCase__ )
self.set_fail_transitions()
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowerCamelCase ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
snake_case : str = 0
for character in keyword:
snake_case : Optional[int] = self.find_next_state(UpperCamelCase__ , UpperCamelCase__ )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case : Optional[Any] = len(self.adlist ) - 1
else:
snake_case : int = next_state
self.adlist[current_state]["output"].append(UpperCamelCase__ )
def lowerCamelCase ( self ) -> None:
'''simple docstring'''
snake_case : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCamelCase__ )
snake_case : Any = 0
while q:
snake_case : List[str] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCamelCase__ )
snake_case : Dict = self.adlist[r]["fail_state"]
while (
self.find_next_state(UpperCamelCase__ , self.adlist[child]["value"] ) is None
and state != 0
):
snake_case : Union[str, Any] = self.adlist[state]["fail_state"]
snake_case : Optional[int] = self.find_next_state(
UpperCamelCase__ , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
snake_case : int = 0
snake_case : int = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowerCamelCase ( self , UpperCamelCase__ ) -> dict[str, list[int]]:
'''simple docstring'''
snake_case : dict = {} # returns a dict with keywords and list of its occurrences
snake_case : Optional[Any] = 0
for i in range(len(UpperCamelCase__ ) ):
while (
self.find_next_state(UpperCamelCase__ , string[i] ) is None
and current_state != 0
):
snake_case : Tuple = self.adlist[current_state]["fail_state"]
snake_case : Tuple = self.find_next_state(UpperCamelCase__ , string[i] )
if next_state is None:
snake_case : Optional[Any] = 0
else:
snake_case : Dict = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case : Any = []
result[key].append(i - len(UpperCamelCase__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _lowerCAmelCase ( snake_case_ ):
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Union[str, Any] = 8
# DPR tok
snake_case : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case : Union[str, Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
snake_case : Union[str, Any] = os.path.join(UpperCamelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
snake_case : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case : Tuple = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case : Optional[Any] = {"unk_token": "<unk>"}
snake_case : Tuple = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
snake_case : Optional[Any] = os.path.join(UpperCamelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
snake_case : int = os.path.join(UpperCamelCase__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
def lowerCamelCase ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowerCamelCase ( self ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowerCamelCase ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Dict = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = self.get_dummy_dataset()
snake_case : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case : int = dataset
snake_case : int = RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = self.get_dummy_dataset()
snake_case : Any = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
snake_case : str = os.path.join(self.tmpdirname , "dataset" )
snake_case : Any = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
snake_case : Any = RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case : str = RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCamelCase__ ) , )
return retriever
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case : Dict = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
snake_case : int = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
snake_case : Any = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(UpperCamelCase__ , open(UpperCamelCase__ , "wb" ) )
snake_case : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
snake_case : Dict = RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : str = 1
snake_case : Any = self.get_dummy_canonical_hf_index_retriever()
snake_case : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case ,snake_case ,snake_case : Any = retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCamelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(UpperCamelCase__ )
snake_case : Union[str, Any] = RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case : List[Any] = retriever.retrieve(UpperCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = 1
snake_case : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
snake_case : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case ,snake_case ,snake_case : Any = retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCamelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase__ )
snake_case : int = RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case : int = retriever.retrieve(UpperCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = 1
snake_case : int = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
snake_case : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case ,snake_case ,snake_case : List[str] = retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCamelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase__ )
snake_case : Any = RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case : Any = retriever.retrieve(UpperCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = 1
snake_case : Tuple = self.get_dummy_legacy_index_retriever()
snake_case : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case ,snake_case ,snake_case : Any = retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , UpperCamelCase__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : int = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase__ )
snake_case : Tuple = RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case : Optional[Any] = retriever.retrieve(UpperCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
snake_case : str = 1
snake_case : Dict = self.get_dummy_canonical_hf_index_retriever()
snake_case : str = [[5, 7], [10, 11]]
snake_case : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case : int = retriever(UpperCamelCase__ , UpperCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase__ )
snake_case ,snake_case ,snake_case : Dict = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
snake_case : Tuple = retriever(
UpperCamelCase__ , UpperCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase__ , return_tensors="pt" , )
snake_case ,snake_case ,snake_case ,snake_case : str = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Tuple = self.get_dpr_ctx_encoder_tokenizer()
snake_case : Union[str, Any] = 1
snake_case : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
retriever.set_ctx_encoder_tokenizer(UpperCamelCase__ )
snake_case : str = [[5, 7], [10, 11]]
snake_case : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case : Dict = retriever(UpperCamelCase__ , UpperCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase__ )
self.assertEqual(
len(UpperCamelCase__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , UpperCamelCase__ ) # check for doc token related keys in dictionary.
| 117
| 1
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=16 , _lowercase=36 , _lowercase=6 , _lowercase=6 , _lowercase=6 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = embedding_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_hidden_groups
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ):
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = AlbertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
_lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase )
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = AlbertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , sentence_order_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = AlbertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = AlbertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = AlbertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = AlbertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = AlbertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Optional[int] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowercase : Union[str, Any] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Any = True
def _lowercase ( self , _lowercase , _lowercase , _lowercase=False ):
"""simple docstring"""
_lowerCAmelCase = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
_lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = AlbertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowercase )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AlbertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" )
_lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase = model(_lowercase , attention_mask=_lowercase )[0]
_lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowercase )
_lowerCAmelCase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1e-4 ) )
| 5
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 5
| 1
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
A_ : Any = (7_20, 12_80) # Height, Width
A_ : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
A_ : int = 1 / 1_00
A_ : Dict = ""
A_ : List[str] = ""
A_ : str = ""
A_ : int = 2_50
def lowerCamelCase_ ( ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = get_dataset(_lowerCamelCase , _lowerCamelCase )
for index in range(_lowerCamelCase ):
lowerCamelCase__ : Dict = random.sample(range(len(_lowerCamelCase ) ) , 4 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = update_image_and_anno(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , filter_scale=_lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCamelCase__ : int = random_chars(32 )
lowerCamelCase__ : Optional[Any] = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
lowerCamelCase__ : Optional[Any] = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowerCamelCase__ : Union[str, Any] = []
for anno in new_annos:
lowerCamelCase__ : List[Any] = anno[3] - anno[1]
lowerCamelCase__ : Any = anno[4] - anno[2]
lowerCamelCase__ : Any = anno[1] + width / 2
lowerCamelCase__ : List[Any] = anno[2] + height / 2
lowerCamelCase__ : List[str] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(_lowerCamelCase )
with open(f'''{file_root}.txt''' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Dict = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , '*.txt' ) ):
lowerCamelCase__ : Union[str, Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(_lowerCamelCase ) as in_file:
lowerCamelCase__ : List[Any] = in_file.readlines()
lowerCamelCase__ : Tuple = os.path.join(_lowerCamelCase , f'''{label_name}.jpg''' )
lowerCamelCase__ : Optional[Any] = []
for obj_list in obj_lists:
lowerCamelCase__ : List[Any] = obj_list.rstrip('\n' ).split(' ' )
lowerCamelCase__ : str = float(obj[1] ) - float(obj[3] ) / 2
lowerCamelCase__ : Optional[int] = float(obj[2] ) - float(obj[4] ) / 2
lowerCamelCase__ : Optional[int] = float(obj[1] ) + float(obj[3] ) / 2
lowerCamelCase__ : int = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 , ):
lowerCamelCase__ : str = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowerCamelCase__ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCamelCase__ : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCamelCase__ : Any = int(scale_x * output_size[1] )
lowerCamelCase__ : Dict = int(scale_y * output_size[0] )
lowerCamelCase__ : int = []
lowerCamelCase__ : Tuple = []
for i, index in enumerate(_lowerCamelCase ):
lowerCamelCase__ : Any = all_img_list[index]
path_list.append(_lowerCamelCase )
lowerCamelCase__ : Dict = all_annos[index]
lowerCamelCase__ : Tuple = cva.imread(_lowerCamelCase )
if i == 0: # top-left
lowerCamelCase__ : int = cva.resize(_lowerCamelCase , (divid_point_x, divid_point_y) )
lowerCamelCase__ : Dict = img
for bbox in img_annos:
lowerCamelCase__ : List[Any] = bbox[1] * scale_x
lowerCamelCase__ : str = bbox[2] * scale_y
lowerCamelCase__ : Optional[int] = bbox[3] * scale_x
lowerCamelCase__ : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowerCamelCase__ : Tuple = cva.resize(_lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
lowerCamelCase__ : Optional[int] = img
for bbox in img_annos:
lowerCamelCase__ : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
lowerCamelCase__ : Optional[Any] = bbox[2] * scale_y
lowerCamelCase__ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
lowerCamelCase__ : Optional[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowerCamelCase__ : List[str] = cva.resize(_lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
lowerCamelCase__ : int = img
for bbox in img_annos:
lowerCamelCase__ : Any = bbox[1] * scale_x
lowerCamelCase__ : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
lowerCamelCase__ : Optional[int] = bbox[3] * scale_x
lowerCamelCase__ : str = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowerCamelCase__ : Union[str, Any] = cva.resize(
_lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowerCamelCase__ : Optional[int] = img
for bbox in img_annos:
lowerCamelCase__ : List[Any] = scale_x + bbox[1] * (1 - scale_x)
lowerCamelCase__ : Any = scale_y + bbox[2] * (1 - scale_y)
lowerCamelCase__ : int = scale_x + bbox[3] * (1 - scale_x)
lowerCamelCase__ : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCamelCase__ : List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase_ ( _lowerCamelCase ):
assert number_char > 1, "The number of character should greater than 1"
lowerCamelCase__ : Any = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 696
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696
| 1
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def a (lowerCAmelCase__ ):
__a = {}
state_dict.pop("""pixel_mean""" , lowerCAmelCase__ )
state_dict.pop("""pixel_std""" , lowerCAmelCase__ )
__a = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__a = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(2 ) )
if layer_nb == 0:
__a = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
__a = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
__a = key.replace("""layers.2""" , """proj_out""" )
__a = value
__a = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="ybelkada/segment-anything" ):
__a = hf_hub_download(lowerCAmelCase__ , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
__a = SamConfig()
elif "sam_vit_l" in model_name:
__a = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__a = SamConfig(
vision_config=lowerCAmelCase__ , )
elif "sam_vit_h" in model_name:
__a = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__a = SamConfig(
vision_config=lowerCAmelCase__ , )
__a = torch.load(lowerCAmelCase__ , map_location="""cpu""" )
__a = replace_keys(lowerCAmelCase__ )
__a = SamImageProcessor()
__a = SamProcessor(image_processor=lowerCAmelCase__ )
__a = SamModel(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
__a = hf_model.to("""cuda""" )
__a = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
__a = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
__a = [[[400, 650]]]
__a = [[1]]
__a = processor(images=np.array(lowerCAmelCase__ ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__a = hf_model(**lowerCAmelCase__ )
__a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
__a = processor(
images=np.array(lowerCAmelCase__ ) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__a = hf_model(**lowerCAmelCase__ )
__a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
__a = ((75, 275, 1_725, 850),)
__a = processor(images=np.array(lowerCAmelCase__ ) , input_boxes=lowerCAmelCase__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__a = hf_model(**lowerCAmelCase__ )
__a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
__a = [[[400, 650], [800, 650]]]
__a = [[1, 1]]
__a = processor(
images=np.array(lowerCAmelCase__ ) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__a = hf_model(**lowerCAmelCase__ )
__a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 99
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=[30, 30] , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=None , UpperCAmelCase=8 , UpperCAmelCase=10 , ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[int] = image_size
__snake_case : Tuple = patch_size
__snake_case : Any = num_channels
__snake_case : Dict = is_training
__snake_case : Dict = use_labels
__snake_case : Any = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Optional[Any] = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Optional[Any] = scope
__snake_case : Tuple = n_targets
__snake_case : str = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__snake_case : Dict = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__snake_case : Any = num_patches + 1 + self.num_detection_tokens
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__snake_case : Any = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__snake_case : Any = []
for i in range(self.batch_size ):
__snake_case : Dict = {}
__snake_case : Optional[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCAmelCase )
__snake_case : Union[str, Any] = torch.rand(self.n_targets , 4 , device=UpperCAmelCase )
labels.append(UpperCAmelCase )
__snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = YolosModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__snake_case : int = model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[int] = YolosForObjectDetection(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__snake_case : Any = model(pixel_values=UpperCAmelCase )
__snake_case : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__snake_case : int = model(pixel_values=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[Any] = config_and_inputs
__snake_case : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict =(YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] =(
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCAmelCase_ : Optional[int] =False
UpperCAmelCase_ : List[Any] =False
UpperCAmelCase_ : str =False
UpperCAmelCase_ : List[Any] =False
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
__snake_case : Any = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__snake_case : int = []
for i in range(self.model_tester.batch_size ):
__snake_case : Dict = {}
__snake_case : int = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCAmelCase , dtype=torch.long )
__snake_case : Union[str, Any] = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCAmelCase , dtype=torch.float )
labels.append(UpperCAmelCase )
__snake_case : Any = labels
return inputs_dict
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = YolosModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(UpperCAmelCase )
__snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[int] = [*signature.parameters.keys()]
__snake_case : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = True
# in YOLOS, the seq_len is different
__snake_case : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = True
__snake_case : Optional[Any] = False
__snake_case : int = True
__snake_case : Any = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__snake_case : int = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : str = True
__snake_case : Union[str, Any] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__snake_case : Tuple = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__snake_case : str = len(UpperCAmelCase )
# Check attention is always last and order is fine
__snake_case : Union[str, Any] = True
__snake_case : List[Any] = True
__snake_case : Tuple = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__snake_case : List[str] = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase ) )
__snake_case : int = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__snake_case : Tuple = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__snake_case : List[Any] = outputs.hidden_states
__snake_case : Dict = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# YOLOS has a different seq_length
__snake_case : Tuple = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCAmelCase )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[int] = YolosModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowerCAmelCase__( ) -> List[Any]:
__snake_case : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : int = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(UpperCAmelCase )
__snake_case : Any = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : int = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(inputs.pixel_values )
# verify outputs
__snake_case : int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__snake_case : Tuple = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=UpperCAmelCase , )
__snake_case : Optional[Any] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
# verify postprocessing
__snake_case : Union[str, Any] = image_processor.post_process_object_detection(
UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__snake_case : List[str] = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(UpperCAmelCase )
__snake_case : List[Any] = [75, 75, 17, 63, 17]
__snake_case : Tuple = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(UpperCAmelCase )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , UpperCAmelCase , atol=1E-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , UpperCAmelCase )
self.assertTrue(torch.allclose(results["boxes"][0, :] , UpperCAmelCase ) )
| 243
| 0
|
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = []
for part_id in partition_order:
A__ = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(UpperCamelCase__ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A__ = spark.range(100 ).repartition(1 )
A__ = Spark(UpperCamelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A__ = spark.range(10 ).repartition(2 )
A__ = [1, 0]
A__ = _generate_iterable_examples(UpperCamelCase__ , UpperCamelCase__ ) # Reverse the partitions.
A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , UpperCamelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__ , A__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A__ = spark.range(10 ).repartition(1 )
A__ = SparkExamplesIterable(UpperCamelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
A__ = lambda UpperCamelCase__ : x.reverse()
A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , [2, 1, 0] )
A__ = SparkExamplesIterable(UpperCamelCase__ ).shuffle_data_sources(UpperCamelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
A__ , A__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ = SparkExamplesIterable(UpperCamelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
A__ , A__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ = SparkExamplesIterable(UpperCamelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
A__ , A__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A__ = spark.range(100 ).repartition(1 )
A__ = Spark(UpperCamelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 706
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCamelCase__( __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = CpmAntTokenizer
lowerCAmelCase__ : List[str] = False
def snake_case__ ( self ) -> Dict:
super().setUp()
A__ = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
A__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def snake_case__ ( self ) -> List[Any]:
A__ = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
A__ = '今天天气真好!'
A__ = ['今天', '天气', '真', '好', '!']
A__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
A__ = '今天天气真好!'
A__ = [tokenizer.bos_token] + tokens
A__ = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,__UpperCAmelCase )
A__ = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
| 536
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """yolos"""
def __init__(self : Optional[Any] , __a : Tuple=768 , __a : Tuple=12 , __a : Tuple=12 , __a : Dict=3072 , __a : Optional[int]="gelu" , __a : Optional[Any]=0.0 , __a : Dict=0.0 , __a : List[str]=0.02 , __a : str=1E-12 , __a : str=[512, 864] , __a : Optional[int]=16 , __a : int=3 , __a : Dict=True , __a : int=100 , __a : Optional[int]=True , __a : Any=False , __a : str=1 , __a : int=5 , __a : Any=2 , __a : List[str]=5 , __a : Optional[Any]=2 , __a : List[Any]=0.1 , **__a : Optional[Any] , ):
super().__init__(**__a )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = num_detection_tokens
UpperCAmelCase_ = use_mid_position_embeddings
UpperCAmelCase_ = auxiliary_loss
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = version.parse("""1.11""" )
@property
def _lowercase (self : Tuple ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase (self : Tuple ):
return 1E-4
@property
def _lowercase (self : Any ):
return 12
| 78
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = AlbertTokenizer
_lowerCamelCase = AlbertTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCamelCase__( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A : str = AlbertTokenizer(__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = '''this is a test'''
__A : Union[str, Any] = '''this is a test'''
return input_text, output_text
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = '''<pad>'''
__A : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(__lowerCamelCase ) , 3_0000 )
def UpperCamelCase__( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase__( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__A : Dict = self.get_tokenizer()
__A : List[Any] = self.get_rust_tokenizer()
__A : int = '''I was born in 92000, and this is falsé.'''
__A : Any = tokenizer.tokenize(__lowerCamelCase )
__A : Dict = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__A : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__A : List[str] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__A : List[Any] = self.get_rust_tokenizer()
__A : Union[str, Any] = tokenizer.encode(__lowerCamelCase )
__A : List[Any] = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = AlbertTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
__A : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [48, 25, 21, 1289] )
__A : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
__A : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
__A : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = AlbertTokenizer(__lowerCamelCase )
__A : List[Any] = tokenizer.encode('''sequence builders''' )
__A : str = tokenizer.encode('''multi-sequence build''' )
__A : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
__A : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 177
| 0
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Tuple = {'vocab_file': 'spiece.model'}
__snake_case : Any = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
__snake_case : int = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
__snake_case : List[Any] = '▁'
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Optional[Any] = VOCAB_FILES_NAMES
__a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="</s>" , A="<unk>" , A="<pad>" , A=1_00 , A=None , A = None , A=True , **A , ) ->None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ :List[str] = [F"""<extra_id_{i}>""" for i in range(A )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ :Optional[Any] = len(set(filter(lambda A : bool('extra_id' in str(A ) ) , A ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCAmelCase__ :Dict = legacy
UpperCAmelCase__ :Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A , unk_token=A , pad_token=A , extra_ids=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , legacy=A , **A , )
UpperCAmelCase__ :Optional[int] = vocab_file
UpperCAmelCase__ :str = extra_ids
UpperCAmelCase__ :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@staticmethod
def A__ ( A , A , A ) ->Tuple:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCAmelCase__ :str = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , A , )
return max_model_length
@property
def A__ ( self ) ->Any:
return self.sp_model.get_piece_size() + self._extra_ids
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :Union[str, Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , A , A = None , A = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(A )) + [1]
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def A__ ( self ) ->str:
return list(
set(filter(lambda A : bool(re.search(r'<extra_id_\d+>' , A ) ) is not None , self.additional_special_tokens ) ) )
def A__ ( self ) ->List[str]:
return [self._convert_token_to_id(A ) for token in self.get_sentinel_tokens()]
def A__ ( self , A ) ->List[int]:
if len(A ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def A__ ( self , A , A = None ) ->List[int]:
UpperCAmelCase__ :Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A__ ( self , A , A = None ) ->List[int]:
UpperCAmelCase__ :str = self._add_eos_if_not_present(A )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ :Optional[Any] = self._add_eos_if_not_present(A )
return token_ids_a + token_ids_a
def __getstate__( self ) ->Optional[int]:
UpperCAmelCase__ :List[str] = self.__dict__.copy()
UpperCAmelCase__ :int = None
return state
def __setstate__( self , A ) ->Any:
UpperCAmelCase__ :Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase__ :Optional[Any] = {}
UpperCAmelCase__ :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , A , **A ) ->List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCAmelCase__ :str = SPIECE_UNDERLINE + text.replace(A , ' ' )
return super().tokenize(A , **A )
def A__ ( self , A , **A ) ->Dict:
if not self.legacy:
UpperCAmelCase__ :Dict = text.startswith(A )
if is_first:
UpperCAmelCase__ :Optional[int] = text[1:]
UpperCAmelCase__ :List[str] = self.sp_model.encode(A , out_type=A )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(A ):
UpperCAmelCase__ :Dict = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def A__ ( self , A ) ->Optional[int]:
if token.startswith('<extra_id_' ):
UpperCAmelCase__ :Tuple = re.match(r'<extra_id_(\d+)>' , A )
UpperCAmelCase__ :Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(A )
def A__ ( self , A ) ->Optional[Any]:
if index < self.sp_model.get_piece_size():
UpperCAmelCase__ :Tuple = self.sp_model.IdToPiece(A )
else:
UpperCAmelCase__ :Union[str, Any] = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def A__ ( self , A ) ->List[str]:
UpperCAmelCase__ :List[str] = []
UpperCAmelCase__ :List[str] = ''
UpperCAmelCase__ :Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
UpperCAmelCase__ :List[str] = True
UpperCAmelCase__ :List[str] = []
else:
current_sub_tokens.append(A )
UpperCAmelCase__ :Optional[Any] = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def A__ ( self , A , A = None ) ->Tuple[str]:
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ :Union[str, Any] = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , 'wb' ) as fi:
UpperCAmelCase__ :Optional[int] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 433
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=24 , A=2 , A=6 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=None , A=10_00 , ) ->Any:
UpperCAmelCase__ :Tuple = parent
UpperCAmelCase__ :List[str] = batch_size
UpperCAmelCase__ :Optional[int] = seq_length
UpperCAmelCase__ :str = is_training
UpperCAmelCase__ :Tuple = use_input_mask
UpperCAmelCase__ :Optional[int] = use_token_type_ids
UpperCAmelCase__ :int = use_labels
UpperCAmelCase__ :Tuple = vocab_size
UpperCAmelCase__ :int = hidden_size
UpperCAmelCase__ :Any = num_hidden_layers
UpperCAmelCase__ :List[Any] = num_attention_heads
UpperCAmelCase__ :Tuple = intermediate_size
UpperCAmelCase__ :List[str] = hidden_act
UpperCAmelCase__ :Any = hidden_dropout_prob
UpperCAmelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ :List[str] = max_position_embeddings
UpperCAmelCase__ :str = type_vocab_size
UpperCAmelCase__ :int = type_sequence_label_size
UpperCAmelCase__ :int = initializer_range
UpperCAmelCase__ :str = num_labels
UpperCAmelCase__ :Tuple = scope
UpperCAmelCase__ :int = range_bbox
def A__ ( self ) ->Union[str, Any]:
UpperCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ :List[Any] = bbox[i, j, 3]
UpperCAmelCase__ :Union[str, Any] = bbox[i, j, 1]
UpperCAmelCase__ :str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ :Any = bbox[i, j, 2]
UpperCAmelCase__ :Dict = bbox[i, j, 0]
UpperCAmelCase__ :Optional[Any] = t
UpperCAmelCase__ :int = None
if self.use_input_mask:
UpperCAmelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ :int = None
if self.use_token_type_ids:
UpperCAmelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ :List[str] = None
UpperCAmelCase__ :List[str] = None
if self.use_labels:
UpperCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ :List[str] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self ) ->Optional[int]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A__ ( self , A , A , A , A , A , A , A , ) ->Any:
UpperCAmelCase__ :Any = LiltModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :Tuple = model(A , bbox=A , attention_mask=A , token_type_ids=A )
UpperCAmelCase__ :List[str] = model(A , bbox=A , token_type_ids=A )
UpperCAmelCase__ :int = model(A , bbox=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self , A , A , A , A , A , A , A , ) ->Dict:
UpperCAmelCase__ :List[str] = self.num_labels
UpperCAmelCase__ :Optional[Any] = LiltForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :Tuple = model(
A , bbox=A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , A , A , A , A , A , A , A , ) ->Union[str, Any]:
UpperCAmelCase__ :str = LiltForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :str = model(
A , bbox=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self ) ->Dict:
UpperCAmelCase__ :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) :Dict = config_and_inputs
UpperCAmelCase__ :Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
'''simple docstring'''
__a : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Optional[Any] = False
__a : int = False
def A__ ( self , A , A , A , A , A ) ->str:
return True
def A__ ( self ) ->List[Any]:
UpperCAmelCase__ :Dict = LiltModelTester(self )
UpperCAmelCase__ :Optional[int] = ConfigTester(self , config_class=A , hidden_size=37 )
def A__ ( self ) ->Optional[Any]:
self.config_tester.run_common_tests()
def A__ ( self ) ->List[str]:
UpperCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ :Optional[int] = type
self.model_tester.create_and_check_model(*A )
def A__ ( self ) ->Any:
UpperCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def A__ ( self ) ->int:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ :Union[str, Any] = LiltModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
@slow
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def A__ ( self ) ->int:
UpperCAmelCase__ :int = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(A )
UpperCAmelCase__ :List[Any] = torch.tensor([[1, 2]] , device=A )
UpperCAmelCase__ :Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ :Union[str, Any] = model(input_ids=A , bbox=A )
UpperCAmelCase__ :Tuple = torch.Size([1, 2, 7_68] )
UpperCAmelCase__ :Any = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=A , )
self.assertTrue(outputs.last_hidden_state.shape , A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A , atol=1e-3 ) )
| 433
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : Tuple = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class A ( a ):
__UpperCAmelCase : List[str] = """marian"""
__UpperCAmelCase : int = ["""past_key_values"""]
__UpperCAmelCase : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , snake_case_=5_8_1_0_1 , snake_case_=None , snake_case_=1_0_2_4 , snake_case_=1_2 , snake_case_=4_0_9_6 , snake_case_=1_6 , snake_case_=1_2 , snake_case_=4_0_9_6 , snake_case_=1_6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=1_0_2_4 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=5_8_1_0_0 , snake_case_=False , snake_case_=5_8_1_0_0 , snake_case_=0 , snake_case_=0 , snake_case_=True , **snake_case_ , ) -> Dict:
_a = vocab_size
_a = decoder_vocab_size or vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
_a = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
class A ( a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_a = {0: '''batch'''}
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''decoder_sequence'''}
_a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_a = self.num_layers
for i in range(snake_case_ ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(snake_case_ , self ).outputs
if self.use_past:
_a = self.num_layers
for i in range(snake_case_ ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_a = common_inputs['''input_ids'''].shape
_a = common_inputs['''decoder_input_ids'''].shape[1]
_a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a = self.num_layers
_a = min(snake_case_ , snake_case_ )
_a = max(snake_case_ , snake_case_ ) - min_num_layers
_a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a = self.num_layers
_a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['''attention_mask'''].dtype
_a = torch.cat(
[common_inputs["attention_mask"], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
_a = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(snake_case_ )
_a = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
_a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
_a = self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
_a = super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-4
| 131
|
"""simple docstring"""
import qiskit
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
_UpperCAmelCase : Any = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase : Tuple = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase : Dict = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Tuple = single_qubit_measure(2, 2)
print(f"Total count for various states are: {counts}")
| 506
| 0
|
import os
from datetime import datetime as dt
from github import Github
lowercase : Optional[Any] = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def snake_case__ ( ):
A : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
A : Tuple = g.get_repo('''huggingface/diffusers''' )
A : int = repo.get_issues(state='''open''' )
for issue in open_issues:
A : Any = sorted(issue.get_comments() , key=lambda lowerCamelCase_ : i.created_at , reverse=snake_case_ )
A : Optional[int] = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 703
|
class __lowercase :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
A : Tuple = {}
def snake_case ( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(__UpperCAmelCase , ''' -> ''' , ''' -> '''.join([str(__UpperCAmelCase ) for j in self.vertex[i]] ) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__UpperCAmelCase )
else:
# else make a new vertex
A : str = [to_vertex]
def snake_case ( self ) -> None:
# visited array for storing already visited nodes
A : int = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
# mark start vertex as visited
A : List[Any] = True
print(__UpperCAmelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
lowercase : Dict = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 423
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.