code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Optional[Any]=3_2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : List[Any]=1_0 , UpperCamelCase__ : int=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[1, 1, 2, 1] , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]="relu" , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Tuple=None , )-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = parent
__lowerCAmelCase: List[Any] = batch_size
__lowerCAmelCase: int = image_size
__lowerCAmelCase: Union[str, Any] = num_channels
__lowerCAmelCase: Any = embeddings_size
__lowerCAmelCase: List[Any] = hidden_sizes
__lowerCAmelCase: int = depths
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: str = use_labels
__lowerCAmelCase: int = hidden_act
__lowerCAmelCase: str = num_labels
__lowerCAmelCase: Optional[int] = scope
__lowerCAmelCase: Union[str, Any] = len(UpperCAmelCase__)
def lowercase_ ( self : Union[str, Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.num_labels)
__lowerCAmelCase: Any = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : str)-> Any:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: str = TFRegNetModel(config=UpperCAmelCase__)
__lowerCAmelCase: int = model(UpperCAmelCase__ , training=UpperCAmelCase__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase_ ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict)-> Any:
'''simple docstring'''
__lowerCAmelCase: str = self.num_labels
__lowerCAmelCase: Any = TFRegNetForImageClassification(UpperCAmelCase__)
__lowerCAmelCase: List[Any] = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[int] = config_and_inputs
__lowerCAmelCase: int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = False
def lowercase_ ( self : List[str])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: int = TFRegNetModelTester(self)
__lowerCAmelCase: Dict = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__)
def lowercase_ ( self : List[str])-> Tuple:
'''simple docstring'''
return
@unittest.skip(reason="RegNet does not use inputs_embeds")
def lowercase_ ( self : Tuple)-> int:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def lowercase_ ( self : str)-> str:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings")
def lowercase_ ( self : Dict)-> int:
'''simple docstring'''
pass
def lowercase_ ( self : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: str = model_class(UpperCAmelCase__)
__lowerCAmelCase: List[Any] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: int = [*signature.parameters.keys()]
__lowerCAmelCase: Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def lowercase_ ( self : int)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def lowercase_ ( self : Optional[Any])-> Dict:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]):
__lowerCAmelCase: str = model_class(UpperCAmelCase__)
__lowerCAmelCase: Tuple = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
__lowerCAmelCase: int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase: Any = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowerCAmelCase , __lowerCAmelCase: int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: int = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase: Union[str, Any] = layer_type
__lowerCAmelCase: int = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase: int = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def lowercase_ ( self : Dict)-> int:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict={}):
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__)
__lowerCAmelCase: int = model(UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__).to_tuple()
def recursive_check(UpperCamelCase__ : int , UpperCamelCase__ : List[str]):
if isinstance(UpperCAmelCase__ , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase__ , UpperCAmelCase__):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(UpperCAmelCase__ , UpperCAmelCase__)) , msg=(
"Tuple and dict output are not equal. Difference:"
f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}"
) , )
recursive_check(UpperCAmelCase__ , UpperCAmelCase__)
for model_class in self.all_model_classes:
__lowerCAmelCase: Optional[int] = model_class(UpperCAmelCase__)
__lowerCAmelCase: str = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
__lowerCAmelCase: Optional[int] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
__lowerCAmelCase: int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
__lowerCAmelCase: Dict = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
__lowerCAmelCase: Optional[int] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"output_hidden_states": True})
__lowerCAmelCase: str = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"output_hidden_states": True})
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def lowercase_ ( self : List[Any])-> List[str]:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Dict = TFRegNetModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def a__ ( ) -> Union[str, Any]:
__lowerCAmelCase: Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict)-> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def lowercase_ ( self : Union[str, Any])-> int:
'''simple docstring'''
__lowerCAmelCase: int = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
__lowerCAmelCase: Dict = self.default_image_processor
__lowerCAmelCase: List[Any] = prepare_img()
__lowerCAmelCase: str = image_processor(images=UpperCAmelCase__ , return_tensors="tf")
# forward pass
__lowerCAmelCase: List[Any] = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
__lowerCAmelCase: str = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
__lowerCAmelCase: List[Any] = tf.constant([-0.4180, -1.5051, -3.4836])
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4)
| 346
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87
| 0
|
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ) -> None:
_a : Any = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_a : Any = Vector()
def __UpperCamelCase ( self ) -> None:
_a : Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowerCamelCase_ ) , '(0,0,0,0,0,1)' )
def __UpperCamelCase ( self ) -> None:
_a : int = Vector([1, 2, 3, 4] )
self.assertEqual(len(lowerCamelCase_ ) , 4 )
def __UpperCamelCase ( self ) -> None:
_a : Optional[Any] = Vector([1, 2] )
_a : Any = Vector([1, 2, 3, 4, 5] )
_a : Dict = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def __UpperCamelCase ( self ) -> None:
_a : Union[str, Any] = Vector([1, 2, 3] )
_a : str = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __UpperCamelCase ( self ) -> None:
_a : Optional[Any] = Vector([1, 2, 3] )
_a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __UpperCamelCase ( self ) -> None:
_a : Dict = Vector([1, 2, 3] )
_a : Any = Vector([2, -1, 4] ) # for test of dot product
_a : Optional[int] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def __UpperCamelCase ( self ) -> None:
self.assertEqual(str(zero_vector(1_0 ) ).count('0' ) , 1_0 )
def __UpperCamelCase ( self ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def __UpperCamelCase ( self ) -> None:
_a : List[Any] = Vector([1, 2, 3] )
_a : List[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowerCamelCase_ , lowerCamelCase_ ) ) , '(3,4,7)' )
def __UpperCamelCase ( self ) -> None:
_a : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_a : Union[str, Any] = x.copy()
self.assertEqual(str(lowerCamelCase_ ) , str(lowerCamelCase_ ) )
def __UpperCamelCase ( self ) -> None:
_a : int = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowerCamelCase_ ) , '(0,1,0)' )
def __UpperCamelCase ( self ) -> None:
_a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(lowerCamelCase_ ) )
def __UpperCamelCase ( self ) -> None:
_a : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a : Any = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCamelCase ( self ) -> None:
_a : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a : Union[str, Any] = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCamelCase ( self ) -> None:
_a : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __UpperCamelCase ( self ) -> None:
_a : Dict = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_a : Optional[int] = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def __UpperCamelCase ( self ) -> None:
_a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(lowerCamelCase_ ) )
def __UpperCamelCase ( self ) -> None:
_a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __UpperCamelCase ( self ) -> None:
_a : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def __UpperCamelCase ( self ) -> None:
_a : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def __UpperCamelCase ( self ) -> None:
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 424
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = """convbert"""
def __init__( self , lowerCamelCase_=3_0_5_2_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=2 , lowerCamelCase_=9 , lowerCamelCase_=1 , lowerCamelCase_=None , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
_a : Optional[Any] = vocab_size
_a : Optional[int] = hidden_size
_a : List[Any] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : List[str] = intermediate_size
_a : Tuple = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Any = max_position_embeddings
_a : Any = type_vocab_size
_a : List[str] = initializer_range
_a : str = layer_norm_eps
_a : Union[str, Any] = embedding_size
_a : int = head_ratio
_a : Union[str, Any] = conv_kernel_size
_a : List[Any] = num_groups
_a : Tuple = classifier_dropout
class a ( snake_case__ ):
'''simple docstring'''
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 424
| 1
|
def UpperCamelCase_ ( __a , __a , __a , __a ) -> List[Any]:
if height >= 1:
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
move_disk(__UpperCamelCase , __UpperCamelCase )
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def UpperCamelCase_ ( __a , __a ) -> int:
print("moving disk from" , __UpperCamelCase , "to" , __UpperCamelCase )
def UpperCamelCase_ ( ) -> Union[str, Any]:
a__ : Optional[Any] = int(input("Height of hanoi: " ).strip() )
move_tower(__UpperCamelCase , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 37
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
snake_case : List[str] = [8, 5, 9, 7]
snake_case : int = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
snake_case : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
"""simple docstring"""
__lowercase = claim_vector
__lowercase = allocated_resources_table
__lowercase = maximum_claim_table
def __magic_name__ ( self ):
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ):
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ):
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ):
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __magic_name__ ( self , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.__need()
__lowercase = self.__allocated_resources_table
__lowercase = self.__available_resources()
__lowercase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 5_0 + """\n""" )
while need_list:
__lowercase = False
for each_need in need_list:
__lowercase = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
__lowercase = False
break
if execution:
__lowercase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowercase = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
__lowercase = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def __magic_name__ ( self ):
"""simple docstring"""
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566
| 0
|
from math import sqrt
def lowerCamelCase_ ( lowerCAmelCase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( lowerCAmelCase__ : int = 10001 ) -> int:
'''simple docstring'''
A = 0
A = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCAmelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCAmelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 224
|
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase__ : int = 4 ) -> list[list[int]]:
'''simple docstring'''
A = abs(lowerCAmelCase__ ) or 4
return [[1 + x + y * row_size for x in range(lowerCAmelCase__ )] for y in range(lowerCAmelCase__ )]
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(lowerCAmelCase__ ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(lowerCAmelCase__ ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(lowerCAmelCase__ ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
A = [list(lowerCAmelCase__ ) for x in zip(*lowerCAmelCase__ )]
return matrix
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
A = matrix[::-1]
return matrix
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
A = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> None:
'''simple docstring'''
for i in matrix:
print(*lowerCAmelCase__ )
if __name__ == "__main__":
__snake_case :Dict =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__snake_case :Optional[Any] =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__snake_case :Tuple =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 224
| 1
|
'''simple docstring'''
from manim import *
class snake_case__ ( snake_case_):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : List[str] = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase_ : Tuple = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Any = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Tuple = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase_ : Optional[Any] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase_ : Optional[Any] = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase_ : Optional[Any] = Text('''CPU''' , font_size=24 )
UpperCAmelCase_ : Optional[int] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
UpperCAmelCase_ : Optional[Any] = [mem.copy() for i in range(1 )]
UpperCAmelCase_ : List[str] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase_ : Optional[int] = Text('''GPU''' , font_size=24 )
UpperCAmelCase_ : Tuple = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.align_to(UpperCamelCase__ , UpperCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase__ )
UpperCAmelCase_ : Any = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Any = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase_ : Dict = Text('''Model''' , font_size=24 )
UpperCAmelCase_ : Optional[Any] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , )
UpperCAmelCase_ : Optional[Any] = MarkupText(
F"First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM." , font_size=24 , )
UpperCAmelCase_ : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=2.5 ) , Write(UpperCamelCase__ ) , Write(UpperCamelCase__ ) )
self.add(UpperCamelCase__ )
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Union[str, Any] = []
for i, rect in enumerate(UpperCamelCase__ ):
UpperCAmelCase_ : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase__ )
cpu_target.generate_target()
UpperCAmelCase_ : Optional[int] = 0.46 / 4
UpperCAmelCase_ : Any = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase__ , buff=0.0 )
cpu_targs.append(UpperCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase__ ) )
second_animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(*UpperCamelCase__ )
self.wait()
| 541
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCAmelCase ( lowercase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case ,snake_case : List[str] = image.size
snake_case ,snake_case : int = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : Any = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
snake_case : Dict = np.array(lowercase ).astype(np.floataa ) / 255.0
snake_case : List[str] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : List[Any] = torch.from_numpy(lowercase )
return 2.0 * image - 1.0
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 100 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
snake_case : Tuple = 1
elif isinstance(UpperCamelCase__ , torch.Tensor ):
snake_case : str = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCamelCase__ )}' )
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = preprocess(UpperCamelCase__ )
snake_case ,snake_case : List[str] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : Any = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : List[str] = next(self.unet.parameters() ).dtype
snake_case : int = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
snake_case : List[Any] = image.to(device=self.device , dtype=UpperCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Union[str, Any] = {}
if accepts_eta:
snake_case : List[str] = eta
for t in self.progress_bar(UpperCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : int = torch.cat([latents, image] , dim=1 )
snake_case : List[Any] = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCamelCase__ ).sample
snake_case : Tuple = torch.clamp(UpperCamelCase__ , -1.0 , 1.0 )
snake_case : Union[str, Any] = image / 2 + 0.5
snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Union[str, Any] = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 178
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_snake_case ,_snake_case ) -> bool:
'''simple docstring'''
if len(_snake_case ) == 0:
return False
__UpperCamelCase = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] ,_snake_case )
else:
return binary_search(a_list[midpoint + 1 :] ,_snake_case )
if __name__ == "__main__":
_A = input("Enter numbers separated by comma:\n").strip()
_A = [int(item.strip()) for item in user_input.split(",")]
_A = int(input("Enter the number to be found in the list:\n").strip())
_A = "" if binary_search(sequence, target) else "not "
print(f"""{target} was {not_str}found in {sequence}""")
| 228
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] , A_ : str )-> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
__UpperCamelCase = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def A ( self : Tuple )-> int:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict )-> int:
__UpperCamelCase = "sgugger/tiny-distilbert-classification"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , only_pretrain_model=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : List[str] )-> Dict:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , torchscript=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def A ( self : Optional[Any] )-> Union[str, Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , fpaa=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict )-> Tuple:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
__UpperCamelCase = None
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] )-> str:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def A ( self : List[Any] )-> List[Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A_ , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple )-> Union[str, Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any )-> List[str]:
__UpperCamelCase = "sshleifer/tinier_bart"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Tuple )-> Optional[int]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : List[str] )-> Dict:
__UpperCamelCase = "sshleifer/tinier_bart"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : int )-> Optional[Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , save_to_csv=A_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A_ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(A_ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(A_ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(A_ , "train_time.csv" ) , env_info_csv_file=os.path.join(A_ , "env.csv" ) , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "env.csv" ) ).exists() )
def A ( self : List[Any] )-> str:
__UpperCamelCase = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(A_ : List[str] ):
self.assertTrue(hasattr(A_ , "sequential" ) )
self.assertTrue(hasattr(A_ , "cumulative" ) )
self.assertTrue(hasattr(A_ , "current" ) )
self.assertTrue(hasattr(A_ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A_ , "log.txt" ) , log_print=A_ , trace_memory_line_by_line=A_ , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ , "log.txt" ) ).exists() )
| 228
| 1
|
'''simple docstring'''
from collections import defaultdict
def snake_case ( a_ : Optional[int] , a_ : List[Any] ) -> bool:
"""simple docstring"""
UpperCamelCase_ : Any = first_str.lower().strip()
UpperCamelCase_ : List[Any] = second_str.lower().strip()
# Remove whitespace
UpperCamelCase_ : Union[str, Any] = first_str.replace(""" """ , """""" )
UpperCamelCase_ : int = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
return False
# Default values for count should be 0
UpperCamelCase_ : int = defaultdict(__UpperCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__UpperCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase =input("Enter the first string ").strip()
UpperCamelCase =input("Enter the second string ").strip()
UpperCamelCase =check_anagrams(input_a, input_b)
print(f"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 208
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : int ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
A__ : str = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 9
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = ['''model.decoder.embed_positions.weights''']
def _snake_case ( A ) -> Optional[Any]:
if "emb" in name:
lowerCAmelCase__ = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
lowerCAmelCase__ = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
lowerCAmelCase__ = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
lowerCAmelCase__ = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
lowerCAmelCase__ = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
lowerCAmelCase__ = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
lowerCAmelCase__ = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
lowerCAmelCase__ = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
lowerCAmelCase__ = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
lowerCAmelCase__ = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCAmelCase__ = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _snake_case ( A , A ) -> Tuple[Dict, Dict]:
lowerCAmelCase__ = list(state_dict.keys() )
lowerCAmelCase__ = {}
for key in keys:
lowerCAmelCase__ = state_dict.pop(lowercase_ )
lowerCAmelCase__ = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCAmelCase__ = val[:hidden_size, :]
lowerCAmelCase__ = val[hidden_size : 2 * hidden_size, :]
lowerCAmelCase__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCAmelCase__ = val
else:
lowerCAmelCase__ = val
return state_dict, enc_dec_proj_state_dict
def _snake_case ( A ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCAmelCase__ = 1024
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
elif checkpoint == "medium":
lowerCAmelCase__ = 1536
lowerCAmelCase__ = 48
lowerCAmelCase__ = 24
elif checkpoint == "large":
lowerCAmelCase__ = 2048
lowerCAmelCase__ = 48
lowerCAmelCase__ = 32
else:
raise ValueError(F"""Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.""" )
lowerCAmelCase__ = MusicgenDecoderConfig(
hidden_size=lowercase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase_ , num_attention_heads=lowercase_ , )
return config
@torch.no_grad()
def _snake_case ( A , A=None , A=None , A="cpu" ) -> List[str]:
lowerCAmelCase__ = MusicGen.get_pretrained(lowercase_ , device=lowercase_ )
lowerCAmelCase__ = decoder_config_from_checkpoint(lowercase_ )
lowerCAmelCase__ = fairseq_model.lm.state_dict()
lowerCAmelCase__ = rename_state_dict(
lowercase_ , hidden_size=decoder_config.hidden_size )
lowerCAmelCase__ = TaEncoderModel.from_pretrained('''t5-base''' )
lowerCAmelCase__ = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
lowerCAmelCase__ = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCAmelCase__ = decoder.load_state_dict(lowercase_ , strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(lowercase_ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowerCAmelCase__ = MusicgenForConditionalGeneration(text_encoder=lowercase_ , audio_encoder=lowercase_ , decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
lowerCAmelCase__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCAmelCase__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCAmelCase__ = model(input_ids=lowercase_ , decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''t5-base''' )
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
lowerCAmelCase__ = MusicgenProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
lowerCAmelCase__ = 2048
lowerCAmelCase__ = 2048
# set other default generation config params
lowerCAmelCase__ = int(30 * audio_encoder.config.frame_rate )
lowerCAmelCase__ = True
lowerCAmelCase__ = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
__UpperCAmelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 707
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = '''RegNetConfig'''
# Base docstring
__UpperCAmelCase = '''facebook/regnet-y-040'''
__UpperCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
__UpperCAmelCase = '''facebook/regnet-y-040'''
__UpperCAmelCase = '''tabby, tabby cat'''
__UpperCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 3 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = "relu" , ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Convad(
lowerCamelCase_ , lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=kernel_size // 2 , groups=lowerCamelCase_ , bias=lowerCamelCase_ , )
lowerCAmelCase__ = nn.BatchNormad(lowerCamelCase_ )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = self.convolution(lowerCamelCase_ )
lowerCAmelCase__ = self.normalization(lowerCamelCase_ )
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[Any]:
super().__init__()
lowerCAmelCase__ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCAmelCase__ = config.num_channels
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowerCAmelCase__ = self.embedder(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 ) -> Any:
super().__init__()
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , stride=lowerCamelCase_ , bias=lowerCamelCase_ )
lowerCAmelCase__ = nn.BatchNormad(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tensor:
lowerCAmelCase__ = self.convolution(lowerCamelCase_ )
lowerCAmelCase__ = self.normalization(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) )
lowerCAmelCase__ = nn.Sequential(
nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.Sigmoid() , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
# b c h w -> b c 1 1
lowerCAmelCase__ = self.pooler(lowerCamelCase_ )
lowerCAmelCase__ = self.attention(lowerCamelCase_ )
lowerCAmelCase__ = hidden_state * attention
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ = nn.Sequential(
RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = hidden_state
lowerCAmelCase__ = self.layer(lowerCamelCase_ )
lowerCAmelCase__ = self.shortcut(lowerCamelCase_ )
hidden_state += residual
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ = nn.Sequential(
RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = hidden_state
lowerCAmelCase__ = self.layer(lowerCamelCase_ )
lowerCAmelCase__ = self.shortcut(lowerCamelCase_ )
hidden_state += residual
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , ) -> Dict:
super().__init__()
lowerCAmelCase__ = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowerCAmelCase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , ) , *[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for _ in range(depth - 1 )] , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = self.layers(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> BaseModelOutputWithNoAttention:
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(lowerCamelCase_ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[Any] = RegNetConfig
lowercase__ : Tuple = "regnet"
lowercase__ : List[str] = "pixel_values"
lowercase__ : Tuple = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
if isinstance(lowerCamelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False ) -> int:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = value
__UpperCAmelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__UpperCAmelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , a__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[int]:
super().__init__(lowerCamelCase_ )
lowerCAmelCase__ = config
lowerCAmelCase__ = RegNetEmbeddings(lowerCamelCase_ )
lowerCAmelCase__ = RegNetEncoder(lowerCamelCase_ )
lowerCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(lowerCamelCase_ )
lowerCAmelCase__ = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(lowerCamelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[Any]:
super().__init__(lowerCamelCase_ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = RegNetModel(lowerCamelCase_ )
# classification head
lowerCAmelCase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier(lowerCamelCase_ )
lowerCAmelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ = '''single_label_classification'''
else:
lowerCAmelCase__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase__ = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ = CrossEntropyLoss()
lowerCAmelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ = BCEWithLogitsLoss()
lowerCAmelCase__ = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
| 98
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__A : Optional[int] = TypeVar('''T''')
class _UpperCAmelCase ( Generic[T] ):
def __init__( self : Optional[Any] , A : T ) -> int:
lowercase_ : Tuple = data
lowercase_ : Node[T] | None = None
def __str__( self : Any ) -> str:
return F'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
def __init__( self : int ) -> None:
lowercase_ : Node[T] | None = None
def __iter__( self : Dict ) -> Iterator[T]:
lowercase_ : Any = self.top
while node:
yield node.data
lowercase_ : Tuple = node.next
def __str__( self : Optional[int] ) -> str:
return "->".join([str(A ) for item in self] )
def __len__( self : List[str] ) -> int:
return len(tuple(iter(self ) ) )
def A ( self : Dict ) -> bool:
return self.top is None
def A ( self : Optional[Any] , A : T ) -> None:
lowercase_ : Optional[Any] = Node(A )
if not self.is_empty():
lowercase_ : List[Any] = self.top
lowercase_ : List[str] = node
def A ( self : Union[str, Any] ) -> T:
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , A )
lowercase_ : Union[str, Any] = self.top
lowercase_ : Dict = self.top.next
return pop_node.data
def A ( self : Optional[int] ) -> T:
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def A ( self : Tuple ) -> None:
lowercase_ : Optional[Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 231
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowercase ( __snake_case : Dict ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowercase ( ):
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowercase_ : Union[str, Any] = [1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case , __snake_case , num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case , __snake_case , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def lowercase ( __snake_case : Union[str, Any] ):
lowercase_ : Dict = [1, 2]
lowercase_ : Tuple = {'''a''': 1, '''b''': 2}
lowercase_ : Union[str, Any] = {'''a''': [1, 2], '''b''': [3, 4]}
lowercase_ : Dict = {'''a''': {'''1''': 1}, '''b''': 2}
lowercase_ : int = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowercase_ : Any = [2, 3]
lowercase_ : Optional[int] = {'''a''': 2, '''b''': 3}
lowercase_ : Any = {'''a''': [2, 3], '''b''': [4, 5]}
lowercase_ : str = {'''a''': {'''1''': 2}, '''b''': 3}
lowercase_ : Union[str, Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
| 231
| 1
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase_ , max_perimeter + 1 ):
_snake_case : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase_ ):
_snake_case : Optional[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
_snake_case : Dict = pythagorean_triple(lowerCAmelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 717
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : Any = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : Optional[Any] = {
'gpt-neox-20b': 2_0_4_8,
}
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
_snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
_snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
_snake_case : int = add_prefix_space
_snake_case : Optional[Any] = pre_tok_class(**lowercase__ )
_snake_case : List[str] = add_prefix_space
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
_snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
_snake_case : Dict = input_ids[-self.model_max_length :]
return input_ids
| 47
| 0
|
from math import pi, sqrt, tan
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_lowercase : Union[str, Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(lowerCamelCase_ , 2 ) * torus_radius * tube_radius
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_lowercase : int = (sidea + sidea + sidea) / 2
_lowercase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 89
|
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
| 0
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
SCREAMING_SNAKE_CASE_: List[str] =logging.getLogger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Union[str, Any] , __a : List[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Dict=None ):
super().__init__(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , index=__a , init_retrieval=__a , )
UpperCAmelCase_ = None
def _lowercase (self : Optional[Any] , __a : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
UpperCAmelCase_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCAmelCase_ = str(distributed_port + 1 )
UpperCAmelCase_ = dist.new_group(ranks=__a , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowercase (self : Any ):
return dist.get_rank(group=self.process_group ) == 0
def _lowercase (self : str , __a : Dict , __a : List[str] , __a : Any=torch.floataa ):
UpperCAmelCase_ = torch.empty(__a , dtype=__a )
dist.scatter(__a , src=0 , scatter_list=__a , group=self.process_group )
return target_tensor
def _lowercase (self : Tuple ):
UpperCAmelCase_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCAmelCase_ = next((addr for addr in addrs if addr.startswith("e" )) , __a )
return ifname
def _lowercase (self : int , __a : np.ndarray , __a : int ):
# single GPU training
if not dist.is_initialized():
UpperCAmelCase_ , UpperCAmelCase_ = self._main_retrieve(__a , __a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__a )
# distributed training
UpperCAmelCase_ = dist.get_world_size(group=self.process_group )
# gather logic
UpperCAmelCase_ = None
if self._is_main():
UpperCAmelCase_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__a )]
dist.gather(torch.tensor(__a ) , dst=0 , gather_list=__a , group=self.process_group )
# scatter logic
UpperCAmelCase_ = question_hidden_states.shape[0]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
if self._is_main():
assert len(__a ) == world_size
UpperCAmelCase_ , UpperCAmelCase_ = self._main_retrieve(torch.cat(__a ).numpy() , __a )
UpperCAmelCase_ , UpperCAmelCase_ = torch.tensor(__a ), torch.tensor(__a )
UpperCAmelCase_ = self._chunk_tensor(__a , __a )
UpperCAmelCase_ = self._chunk_tensor(__a , __a )
UpperCAmelCase_ = self._scattered(__a , [n_queries, n_docs] , target_type=torch.intaa )
UpperCAmelCase_ = self._scattered(__a , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__a )
| 415
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: Dict ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 415
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''dpt'''
def __init__( self , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=3_8_4 , __lowerCAmelCase=1_6 , __lowerCAmelCase=3 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=[2, 5, 8, 1_1] , __lowerCAmelCase="project" , __lowerCAmelCase=[4, 2, 1, 0.5] , __lowerCAmelCase=[9_6, 1_9_2, 3_8_4, 7_6_8] , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=-1 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.4 , __lowerCAmelCase=2_5_5 , __lowerCAmelCase=0.1 , __lowerCAmelCase=[1, 1_0_2_4, 2_4, 2_4] , __lowerCAmelCase=[0, 1] , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
__magic_name__ :str = hidden_size
__magic_name__ :Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
__magic_name__ :Tuple = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
__magic_name__ :List[Any] = BitConfig(**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
__magic_name__ :Tuple = BitConfig(**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Dict = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
__magic_name__ :Any = backbone_featmap_shape
__magic_name__ :Any = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
__magic_name__ :Dict = None
__magic_name__ :Dict = None
__magic_name__ :List[Any] = []
__magic_name__ :int = num_hidden_layers
__magic_name__ :Union[str, Any] = num_attention_heads
__magic_name__ :Tuple = intermediate_size
__magic_name__ :List[Any] = hidden_act
__magic_name__ :int = hidden_dropout_prob
__magic_name__ :Dict = attention_probs_dropout_prob
__magic_name__ :int = initializer_range
__magic_name__ :Tuple = layer_norm_eps
__magic_name__ :List[Any] = image_size
__magic_name__ :str = patch_size
__magic_name__ :str = num_channels
__magic_name__ :Union[str, Any] = qkv_bias
__magic_name__ :Any = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
__magic_name__ :Tuple = readout_type
__magic_name__ :List[Any] = reassemble_factors
__magic_name__ :Any = neck_hidden_sizes
__magic_name__ :int = fusion_hidden_size
__magic_name__ :str = head_in_index
__magic_name__ :Any = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__magic_name__ :Dict = use_auxiliary_head
__magic_name__ :Dict = auxiliary_loss_weight
__magic_name__ :Optional[int] = semantic_loss_ignore_index
__magic_name__ :Any = semantic_classifier_dropout
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__magic_name__ :List[str] = self.backbone_config.to_dict()
__magic_name__ :Optional[Any] = self.__class__.model_type
return output
| 0
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
"""simple docstring"""
super().__init__(
UpperCamelCase__ , split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , num_proc=UpperCamelCase__ , **UpperCamelCase__ , )
a_ = field
a_ = path_or_paths if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else {self.split: path_or_paths}
a_ = Json(
cache_dir=UpperCamelCase__ , data_files=UpperCamelCase__ , features=UpperCamelCase__ , field=UpperCamelCase__ , **UpperCamelCase__ , )
def _a ( self ):
"""simple docstring"""
if self.streaming:
a_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
a_ = dataset
a_ = path_or_buf
a_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a_ = num_proc
a_ = 'utf-8'
a_ = to_json_kwargs
def _a ( self ):
"""simple docstring"""
a_ = self.to_json_kwargs.pop('path_or_buf' , UpperCamelCase__ )
a_ = self.to_json_kwargs.pop('orient' , 'records' )
a_ = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
a_ = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
a_ = self.to_json_kwargs.pop('compression' , UpperCamelCase__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=UpperCamelCase__ ) as buffer:
a_ = self._write(file_obj=UpperCamelCase__ , orient=UpperCamelCase__ , lines=UpperCamelCase__ , index=UpperCamelCase__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'The compression parameter is not supported when writing to a buffer, but compression={compression}'
' was passed. Please provide a local path instead.' )
a_ = self._write(
file_obj=self.path_or_buf , orient=UpperCamelCase__ , lines=UpperCamelCase__ , index=UpperCamelCase__ , **self.to_json_kwargs )
return written
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ , a_ , a_ , a_ , a_ = args
a_ = query_table(
table=self.dataset.data , key=slice(UpperCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a_ = batch.to_pandas().to_json(
path_or_buf=UpperCamelCase__ , orient=UpperCamelCase__ , lines=UpperCamelCase__ , index=UpperCamelCase__ , **UpperCamelCase__ )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ):
"""simple docstring"""
a_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
a_ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCamelCase__ )
else:
a_ , a_ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase__ , UpperCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(UpperCamelCase__ )
return written
| 536
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : Any =tempfile.mkdtemp()
snake_case : Optional[Any] =BlipImageProcessor()
snake_case : Union[str, Any] =GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
snake_case : Union[str, Any] =BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
snake_case : Optional[int] =InstructBlipProcessor(_snake_case, _snake_case, _snake_case )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Any, **_snake_case : Dict ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname, **_snake_case ).tokenizer
def __snake_case ( self : Union[str, Any], **_snake_case : Any ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname, **_snake_case ).image_processor
def __snake_case ( self : str, **_snake_case : Union[str, Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname, **_snake_case ).qformer_tokenizer
def __snake_case ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : List[Any] =[np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
snake_case : Optional[int] =[Image.fromarray(np.moveaxis(_snake_case, 0, -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : List[Any] =InstructBlipProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor(), qformer_tokenizer=self.get_qformer_tokenizer(), )
processor.save_pretrained(self.tmpdirname )
snake_case : Optional[Any] =self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
snake_case : Any =self.get_image_processor(do_normalize=_snake_case, padding_value=1.0 )
snake_case : Union[str, Any] =InstructBlipProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=_snake_case, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, _snake_case )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, _snake_case )
self.assertIsInstance(processor.qformer_tokenizer, _snake_case )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Tuple =self.get_image_processor()
snake_case : List[str] =self.get_tokenizer()
snake_case : Tuple =self.get_qformer_tokenizer()
snake_case : Any =InstructBlipProcessor(
tokenizer=_snake_case, image_processor=_snake_case, qformer_tokenizer=_snake_case )
snake_case : str =self.prepare_image_inputs()
snake_case : Union[str, Any] =image_processor(_snake_case, return_tensors='''np''' )
snake_case : Dict =processor(images=_snake_case, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def __snake_case ( self : Dict ):
'''simple docstring'''
snake_case : Tuple =self.get_image_processor()
snake_case : List[Any] =self.get_tokenizer()
snake_case : Optional[Any] =self.get_qformer_tokenizer()
snake_case : Tuple =InstructBlipProcessor(
tokenizer=_snake_case, image_processor=_snake_case, qformer_tokenizer=_snake_case )
snake_case : List[Any] ='''lower newer'''
snake_case : List[Any] =processor(text=_snake_case )
snake_case : int =tokenizer(_snake_case, return_token_type_ids=_snake_case )
snake_case : Union[str, Any] =qformer_tokenizer(_snake_case, return_token_type_ids=_snake_case )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key], encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key], encoded_processor['''qformer_''' + key] )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : int =self.get_image_processor()
snake_case : Tuple =self.get_tokenizer()
snake_case : List[Any] =self.get_qformer_tokenizer()
snake_case : Optional[Any] =InstructBlipProcessor(
tokenizer=_snake_case, image_processor=_snake_case, qformer_tokenizer=_snake_case )
snake_case : int ='''lower newer'''
snake_case : Optional[int] =self.prepare_image_inputs()
snake_case : int =processor(text=_snake_case, images=_snake_case )
self.assertListEqual(
list(inputs.keys() ), ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''], )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : Union[str, Any] =self.get_image_processor()
snake_case : Any =self.get_tokenizer()
snake_case : List[Any] =self.get_qformer_tokenizer()
snake_case : Any =InstructBlipProcessor(
tokenizer=_snake_case, image_processor=_snake_case, qformer_tokenizer=_snake_case )
snake_case : Optional[int] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : Optional[int] =processor.batch_decode(_snake_case )
snake_case : Optional[int] =tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case, _snake_case )
def __snake_case ( self : str ):
'''simple docstring'''
snake_case : Tuple =self.get_image_processor()
snake_case : str =self.get_tokenizer()
snake_case : str =self.get_qformer_tokenizer()
snake_case : Optional[Any] =InstructBlipProcessor(
tokenizer=_snake_case, image_processor=_snake_case, qformer_tokenizer=_snake_case )
snake_case : Union[str, Any] ='''lower newer'''
snake_case : Optional[int] =self.prepare_image_inputs()
snake_case : Optional[Any] =processor(text=_snake_case, images=_snake_case )
self.assertListEqual(
list(inputs.keys() ), ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''], )
| 136
|
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Dict =[0] * no_of_processes
snake_case : Dict =[0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase_ ):
snake_case : Any =burst_time[i]
snake_case : list[int] =[]
snake_case : Optional[Any] =0
snake_case : List[Any] =0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case : Union[str, Any] =[]
snake_case : Optional[int] =-1
for i in range(lowerCamelCase_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
snake_case : Any =ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case : str =i
total_time += burst_time[target_process]
completed += 1
snake_case : Dict =0
snake_case : Dict =(
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Optional[int] =[0] * no_of_processes
for i in range(lowerCamelCase_ ):
snake_case : Optional[int] =burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
A : Optional[Any] = 4
A : Dict = [2, 5, 3, 7]
A : Any = [0, 0, 0, 0]
A : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
A : Optional[int] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
f"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
f"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(f"\nAverage waiting time = {mean(waiting_time):.5f}")
print(f"Average turnaround time = {mean(turn_around_time):.5f}")
| 136
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowercase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = TextaTextGenerationPipeline(model=_snake_case , tokenizer=_snake_case )
return generator, ["Something to write", "Something else"]
def _lowercase ( self : int , __lowerCamelCase : str , __lowerCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = generator("""Something there""" )
self.assertEqual(_snake_case , [{"""generated_text""": ANY(_snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
UpperCAmelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}],
[{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}],
] , )
UpperCAmelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}],
[{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}],
] , )
with self.assertRaises(_snake_case ):
generator(4 )
@require_torch
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
UpperCAmelCase = generator("""Something there""" , do_sample=_snake_case )
self.assertEqual(_snake_case , [{"""generated_text""": """"""}] )
UpperCAmelCase = 3
UpperCAmelCase = generator(
"""Something there""" , num_return_sequences=_snake_case , num_beams=_snake_case , )
UpperCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(_snake_case , _snake_case )
UpperCAmelCase = generator("""This is a test""" , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case )
self.assertEqual(
_snake_case , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
UpperCAmelCase = generator.model.config.eos_token_id
UpperCAmelCase = "<pad>"
UpperCAmelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , )
self.assertEqual(
_snake_case , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
UpperCAmelCase = generator("""Something there""" , do_sample=_snake_case )
self.assertEqual(_snake_case , [{"""generated_text""": """"""}] )
| 377
|
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( snake_case__ ):
_a : Any = ["""pixel_values"""]
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = 1 / 2_5_5 , a = True , a = None , a = True , **a , ):
super().__init__(**UpperCAmelCase_ )
snake_case__ : int =size if size is not None else {"""shortest_edge""": 2_2_4}
snake_case__ : Dict =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
snake_case__ : Optional[Any] =crop_size if crop_size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case__ : str =get_size_dict(UpperCAmelCase_ , param_name="""crop_size""" )
snake_case__ : str =do_resize
snake_case__ : Optional[Any] =size
snake_case__ : Union[str, Any] =resample
snake_case__ : int =do_rescale
snake_case__ : str =rescale_factor
snake_case__ : Union[str, Any] =do_center_crop
snake_case__ : Union[str, Any] =crop_size
snake_case__ : Dict =do_flip_channel_order
def lowercase__ ( self , a , a , a = PIL.Image.BILINEAR , a = None , **a , ):
snake_case__ : List[Any] =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case__ : List[str] =get_resize_output_image_size(UpperCAmelCase_ , size=size["""shortest_edge"""] , default_to_square=UpperCAmelCase_ )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self , a , a , a = None , **a , ):
snake_case__ : Any =get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(UpperCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self , a , a , a = None , **a , ):
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self , a , a = None ):
return flip_channel_order(UpperCAmelCase_ , data_format=UpperCAmelCase_ )
def lowercase__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
snake_case__ : Any =do_resize if do_resize is not None else self.do_resize
snake_case__ : Tuple =resample if resample is not None else self.resample
snake_case__ : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : Union[str, Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ : Any =(
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case__ : Dict =size if size is not None else self.size
snake_case__ : Any =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
snake_case__ : int =crop_size if crop_size is not None else self.crop_size
snake_case__ : List[str] =get_size_dict(UpperCAmelCase_ , param_name="""crop_size""" )
snake_case__ : Any =make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case__ : Tuple =[to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
snake_case__ : Tuple =[self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_center_crop:
snake_case__ : Any =[self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
if do_rescale:
snake_case__ : Tuple =[self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case__ : Optional[int] =[self.flip_channel_order(image=UpperCAmelCase_ ) for image in images]
snake_case__ : Optional[int] =[to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
snake_case__ : int ={"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
def lowercase__ ( self , a , a = None ):
snake_case__ : Union[str, Any] =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
snake_case__ : Optional[int] =target_sizes.numpy()
snake_case__ : Any =[]
for idx in range(len(UpperCAmelCase_ ) ):
snake_case__ : List[Any] =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=UpperCAmelCase_ )
snake_case__ : Optional[int] =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
snake_case__ : Optional[Any] =logits.argmax(dim=1 )
snake_case__ : int =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 702
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Any = {"""vocab_file""": """spiece.model"""}
__lowerCamelCase : Optional[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
__lowerCamelCase : List[str] = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
class _lowercase ( _A ):
_a : int = VOCAB_FILES_NAMES
_a : List[str] = PRETRAINED_VOCAB_FILES_MAP
_a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Tuple = ['input_ids', 'attention_mask']
_a : List[int] = []
def __init__( self , a , a="<unk>" , a="<s>" , a="</s>" , a="<pad>" , a="[SEP]" , a="[MASK]" , a="[CLS]" , a = None , **a , ):
snake_case__ : str =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
snake_case__ : Optional[int] =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
snake_case__ : Any =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
snake_case__ : List[str] =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
snake_case__ : int =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
snake_case__ : Tuple =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Union[str, Any] =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
snake_case__ : Union[str, Any] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , sep_token=a , mask_token=a , cls_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
snake_case__ : Optional[int] =vocab_file
snake_case__ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def lowercase__ ( self ):
return self.sp_model.get_piece_size()
def lowercase__ ( self ):
snake_case__ : Optional[Any] ={self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
snake_case__ : List[Any] =self.__dict__.copy()
snake_case__ : Optional[int] =None
return state
def __setstate__( self , a ):
snake_case__ : Optional[Any] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ : Optional[Any] ={}
snake_case__ : Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , a ):
return self.sp_model.encode(a , out_type=a )
def lowercase__ ( self , a ):
return self.sp_model.piece_to_id(a )
def lowercase__ ( self , a ):
snake_case__ : Dict =self.sp_model.IdToPiece(a )
return token
def lowercase__ ( self , a ):
snake_case__ : Optional[int] =[]
snake_case__ : Dict =""""""
snake_case__ : Dict =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
snake_case__ : Optional[Any] =True
snake_case__ : Dict =[]
else:
current_sub_tokens.append(a )
snake_case__ : List[Any] =False
out_string += self.sp_model.decode(a )
return out_string.strip()
def lowercase__ ( self , a , a = False , a = None , a = True , **a , ):
snake_case__ : List[str] =kwargs.pop("""use_source_tokenizer""" , a )
snake_case__ : int =self.convert_ids_to_tokens(a , skip_special_tokens=a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case__ : List[Any] =[]
snake_case__ : int =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a ) )
snake_case__ : List[Any] =[]
sub_texts.append(a )
else:
current_sub_text.append(a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case__ : Dict =re.sub(R""" (\[(MASK|SEP)\])""" , R"""\1""" , """ """.join(a ) )
else:
snake_case__ : Dict ="""""".join(a )
snake_case__ : str =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case__ : Any =self.clean_up_tokenization(a )
return clean_text
else:
return text
def lowercase__ ( self , a , a = None ):
if not os.path.isdir(a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case__ : Any =os.path.join(
a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , """wb""" ) as fi:
snake_case__ : Any =self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def lowercase__ ( self , a , a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Union[str, Any] =[self.cls_token_id]
snake_case__ : Dict =[self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self , a , a = None , a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def lowercase__ ( self , a , a = None ):
snake_case__ : List[str] =[self.sep_token_id]
snake_case__ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 448
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
set_seed(770)
lowerCAmelCase__ = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
lowerCAmelCase__ = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
lowerCAmelCase__ = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase__ = os.path.join(os.path.expanduser('~'), '.cache')
lowerCAmelCase__ = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=False) -> str:
UpperCamelCase__ : Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCamelCase_ , REMOTE_MODEL_PATHS[key]['file_name'])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Dict:
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
hf_hub_download(repo_id=lowerCamelCase_ , filename=lowerCamelCase_ , local_dir=lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_="text") -> List[str]:
if model_type == "text":
UpperCamelCase__ : Optional[int] = BarkSemanticModel
UpperCamelCase__ : Optional[Any] = BarkSemanticConfig
UpperCamelCase__ : Union[str, Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
UpperCamelCase__ : Tuple = BarkCoarseModel
UpperCamelCase__ : Optional[int] = BarkCoarseConfig
UpperCamelCase__ : str = BarkCoarseGenerationConfig
elif model_type == "fine":
UpperCamelCase__ : Dict = BarkFineModel
UpperCamelCase__ : int = BarkFineConfig
UpperCamelCase__ : Optional[Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
UpperCamelCase__ : int = f'{model_type}_small' if use_small else model_type
UpperCamelCase__ : int = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCamelCase_):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.')
_download(model_info['repo_id'] , model_info['file_name'])
UpperCamelCase__ : Optional[int] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
# this is a hack
UpperCamelCase__ : Any = checkpoint['model_args']
if "input_vocab_size" not in model_args:
UpperCamelCase__ : str = model_args['vocab_size']
UpperCamelCase__ : Tuple = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
UpperCamelCase__ : Optional[Any] = model_args.pop('n_head')
UpperCamelCase__ : Union[str, Any] = model_args.pop('n_embd')
UpperCamelCase__ : int = model_args.pop('n_layer')
UpperCamelCase__ : Union[str, Any] = ConfigClass(**checkpoint['model_args'])
UpperCamelCase__ : Optional[int] = ModelClass(config=lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = GenerationConfigClass()
UpperCamelCase__ : List[Any] = model_generation_config
UpperCamelCase__ : List[Any] = checkpoint['model']
# fixup checkpoint
UpperCamelCase__ : Dict = '_orig_mod.'
for k, v in list(state_dict.items()):
if k.startswith(lowerCamelCase_):
# replace part of the key with corresponding layer name in HF implementation
UpperCamelCase__ : List[str] = k[len(lowerCamelCase_) :]
for old_layer_name in new_layer_name_dict:
UpperCamelCase__ : Any = new_k.replace(lowerCamelCase_ , new_layer_name_dict[old_layer_name])
UpperCamelCase__ : Dict = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : str = set(state_dict.keys()) - set(model.state_dict().keys())
UpperCamelCase__ : Optional[Any] = {k for k in extra_keys if not k.endswith('.attn.bias')}
UpperCamelCase__ : List[Any] = set(model.state_dict().keys()) - set(state_dict.keys())
UpperCamelCase__ : str = {k for k in missing_keys if not k.endswith('.attn.bias')}
if len(lowerCamelCase_) != 0:
raise ValueError(f'extra keys found: {extra_keys}')
if len(lowerCamelCase_) != 0:
raise ValueError(f'missing keys: {missing_keys}')
model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_)
UpperCamelCase__ : List[str] = model.num_parameters(exclude_embeddings=lowerCamelCase_)
UpperCamelCase__ : int = checkpoint['best_val_loss'].item()
logger.info(f'model loaded: {round(n_params/1e6 , 1)}M params, {round(lowerCamelCase_ , 3)} loss')
model.eval()
model.to(lowerCamelCase_)
del checkpoint, state_dict
return model
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_="text") -> int:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
UpperCamelCase__ : Any = 'cpu' # do conversion on cpu
UpperCamelCase__ : str = _get_ckpt_path(lowerCamelCase_ , use_small=lowerCamelCase_)
UpperCamelCase__ : Tuple = _load_model(lowerCamelCase_ , lowerCamelCase_ , model_type=lowerCamelCase_ , use_small=lowerCamelCase_)
# load bark initial model
UpperCamelCase__ : Union[str, Any] = _bark_load_model(lowerCamelCase_ , 'cpu' , model_type=lowerCamelCase_ , use_small=lowerCamelCase_)
if model_type == "text":
UpperCamelCase__ : Union[str, Any] = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCamelCase_) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters')
# check if same output as the bark model
UpperCamelCase__ : Any = 5
UpperCamelCase__ : str = 10
if model_type in ["text", "coarse"]:
UpperCamelCase__ : Optional[int] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int)
UpperCamelCase__ : Tuple = bark_model(lowerCamelCase_)[0]
UpperCamelCase__ : List[Any] = model(lowerCamelCase_)
# take last logits
UpperCamelCase__ : Tuple = output_new_model_total.logits[:, [-1], :]
else:
UpperCamelCase__ : Tuple = 3
UpperCamelCase__ : List[Any] = 8
UpperCamelCase__ : Optional[int] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int)
UpperCamelCase__ : str = model(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Any = bark_model(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape')
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal')
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
model.save_pretrained(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[Any]:
UpperCamelCase__ : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCamelCase_ , 'config.json'))
UpperCamelCase__ : Dict = BarkCoarseConfig.from_pretrained(os.path.join(lowerCamelCase_ , 'config.json'))
UpperCamelCase__ : Optional[int] = BarkFineConfig.from_pretrained(os.path.join(lowerCamelCase_ , 'config.json'))
UpperCamelCase__ : Union[str, Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz')
UpperCamelCase__ : str = BarkSemanticModel.from_pretrained(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = BarkCoarseModel.from_pretrained(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = BarkFineModel.from_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = EncodecModel.from_pretrained('facebook/encodec_24khz')
UpperCamelCase__ : Optional[Any] = BarkConfig.from_sub_model_configs(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config)
UpperCamelCase__ : Dict = BarkModel(lowerCamelCase_)
UpperCamelCase__ : int = semantic
UpperCamelCase__ : Tuple = coarseAcoustic
UpperCamelCase__ : Union[str, Any] = fineAcoustic
UpperCamelCase__ : Optional[int] = codec
UpperCamelCase__ : Union[str, Any] = bark_generation_config
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
bark.save_pretrained(lowerCamelCase_ , repo_id=lowerCamelCase_ , push_to_hub=lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
lowerCAmelCase__ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 596
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = field(default_factory=__lowerCamelCase )
_lowerCamelCase = field(default_factory=__lowerCamelCase )
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Tensor):
UpperCamelCase__ : str = len(list(m.modules())) == 1 or isinstance(UpperCAmelCase_ , nn.Convad) or isinstance(UpperCAmelCase_ , nn.BatchNormad)
if has_not_submodules:
self.traced.append(UpperCAmelCase_)
def __call__( self : List[str] , UpperCAmelCase_ : Tensor):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(UpperCAmelCase_)
[x.remove() for x in self.handles]
return self
@property
def __UpperCamelCase ( self : Union[str, Any]):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase_: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 1
_lowerCamelCase = field(default_factory=__lowerCamelCase )
_lowerCamelCase = field(default_factory=__lowerCamelCase )
_lowerCamelCase = True
def __call__( self : Optional[Any] , UpperCAmelCase_ : Tensor):
UpperCamelCase__ : int = Tracker(self.dest)(UpperCAmelCase_).parametrized
UpperCamelCase__ : Any = Tracker(self.src)(UpperCAmelCase_).parametrized
UpperCamelCase__ : Any = list(filter(lambda UpperCAmelCase_: type(UpperCAmelCase_) not in self.src_skip , UpperCAmelCase_))
UpperCamelCase__ : Optional[int] = list(filter(lambda UpperCAmelCase_: type(UpperCAmelCase_) not in self.dest_skip , UpperCAmelCase_))
if len(UpperCAmelCase_) != len(UpperCAmelCase_) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(UpperCAmelCase_)} operations while'
F' destination module has {len(UpperCAmelCase_)}.')
for dest_m, src_m in zip(UpperCAmelCase_ , UpperCAmelCase_):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}')
class __lowercase (nn.Module ):
def __init__( self : Any , UpperCAmelCase_ : nn.Module):
super().__init__()
UpperCamelCase__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block'), F'Unexpected layer name {k}'
UpperCamelCase__ : Optional[Any] = len(UpperCAmelCase_) + 1
feature_blocks.append((F'res{block_index}', v))
UpperCamelCase__ : Any = nn.ModuleDict(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Tensor):
return get_trunk_forward_outputs(
UpperCAmelCase_ , out_feat_keys=UpperCAmelCase_ , feature_blocks=self._feature_blocks , )
class __lowercase (__lowerCamelCase ):
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : str):
UpperCamelCase__ : int = x.split('-')
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : str):
# default to timm!
if x not in self:
UpperCamelCase__ : List[Any] = self.convert_name_to_timm(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = partial(lambda: (timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_).eval(), None))
else:
UpperCamelCase__ : List[str] = super().__getitem__(UpperCAmelCase_)
return val
class __lowercase (__lowerCamelCase ):
def __getitem__( self : Tuple , UpperCAmelCase_ : str):
if "seer" in x and "in1k" not in x:
UpperCamelCase__ : Optional[Any] = RegNetModel
else:
UpperCamelCase__ : Optional[Any] = RegNetForImageClassification
return val
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
for from_key, to_key in keys:
UpperCamelCase__ : str = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}')
return to_state_dict
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ) -> List[Any]:
print(f'Converting {name}...')
with torch.no_grad():
UpperCamelCase__, UpperCamelCase__ : Any = from_model_func()
UpperCamelCase__ : int = our_model_func(lowerCamelCase_).eval()
UpperCamelCase__ : Union[str, Any] = ModuleTransfer(src=lowerCamelCase_ , dest=lowerCamelCase_ , raise_if_mismatch=lowerCamelCase_)
UpperCamelCase__ : Dict = torch.randn((1, 3, 224, 224))
module_transfer(lowerCamelCase_)
if from_state_dict is not None:
UpperCamelCase__ : Any = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCamelCase__ : Optional[int] = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
UpperCamelCase__ : Optional[Any] = manually_copy_vissl_head(lowerCamelCase_ , our_model.state_dict() , lowerCamelCase_)
our_model.load_state_dict(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = our_model(lowerCamelCase_ , output_hidden_states=lowerCamelCase_)
UpperCamelCase__ : Dict = (
our_outputs.logits if isinstance(lowerCamelCase_ , lowerCamelCase_) else our_outputs.last_hidden_state
)
UpperCamelCase__ : Optional[int] = from_model(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = from_output[-1] if type(lowerCamelCase_) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCamelCase__ : Any = our_outputs.hidden_states[-1]
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = 224 if 'seer' not in name else 384
# we can use the convnext one
UpperCamelCase__ : int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowerCamelCase_)
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
print(f'Pushed {name}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True) -> int:
UpperCamelCase__ : Any = 'imagenet-1k-id2label.json'
UpperCamelCase__ : int = 1_000
UpperCamelCase__ : Tuple = (1, num_labels)
UpperCamelCase__ : Dict = 'huggingface/label-files'
UpperCamelCase__ : str = num_labels
UpperCamelCase__ : Optional[int] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : Dict = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Dict = idalabel
UpperCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : int = partial(lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_)
UpperCamelCase__ : Tuple = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x'),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x'),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x'),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x'),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x'),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='x'),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='x'),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='x'),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='x'),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='x'),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='x'),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='x'),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010),
}
UpperCamelCase__ : Dict = NameToOurModelFuncMap()
UpperCamelCase__ : Optional[int] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCamelCase_ , lowerCamelCase_) -> Tuple[nn.Module, Dict]:
UpperCamelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , model_dir=str(lowerCamelCase_) , map_location='cpu')
UpperCamelCase__ : List[str] = model_func()
# check if we have a head, if yes add it
UpperCamelCase__ : str = files['classy_state_dict']['base_model']['model']
UpperCamelCase__ : str = model_state_dict['trunk']
model.load_state_dict(lowerCamelCase_)
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCamelCase__ : Dict = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : Any = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : int = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf()) , )
UpperCamelCase__ : List[str] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52))) , )
# IN1K finetuned
UpperCamelCase__ : Tuple = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : Optional[Any] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : Any = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf()) , )
UpperCamelCase__ : Tuple = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52))) , )
if model_name:
convert_weight_and_push(
lowerCamelCase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowerCamelCase_ , lowerCamelCase_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCamelCase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 596
| 1
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCamelCase : Any = ['gpt2']
__UpperCamelCase : Union[str, Any] = 'gpt2'
if is_tf_available():
class _UpperCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : Union[str, Any] = tokenizer
__lowerCamelCase : Any = AutoConfig.from_pretrained(_lowerCamelCase )
__lowerCamelCase : str = TFGPTaLMHeadModel.from_config(_lowerCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def _snake_case ( self : Any , _lowerCamelCase : int ):
'''simple docstring'''
__lowerCamelCase : int = self.tokenizer(_lowerCamelCase )
__lowerCamelCase : Optional[int] = tokenized["""input_ids"""].to_tensor()
__lowerCamelCase : int = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase : List[str] = self.model(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ):
'''simple docstring'''
super().setUp()
__lowerCamelCase : str = [GPTaTokenizer.from_pretrained(_lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase : Any = [TFGPTaTokenizer.from_pretrained(_lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase : List[Any] = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
__lowerCamelCase : List[str] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case ( self : int ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCamelCase : str = tokenizer([test_inputs] , return_tensors="""tf""" )
__lowerCamelCase : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase : Tuple = python_outputs[key].numpy()
__lowerCamelCase : str = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(_lowerCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase : List[Any] = tf.function(_lowerCamelCase )
for test_inputs in self.test_sentences:
__lowerCamelCase : Tuple = tf.constant(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = compiled_tokenizer(_lowerCamelCase )
__lowerCamelCase : Tuple = tf_tokenizer(_lowerCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case ( self : int ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase : Union[str, Any] = ModelToSave(tokenizer=_lowerCamelCase )
__lowerCamelCase : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase : Dict = model.serving(_lowerCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase : List[Any] = Path(_lowerCamelCase ) / """saved.model"""
tf.saved_model.save(_lowerCamelCase , _lowerCamelCase , signatures={"""serving_default""": model.serving} )
__lowerCamelCase : int = tf.saved_model.load(_lowerCamelCase )
__lowerCamelCase : int = loaded_model.signatures["""serving_default"""](_lowerCamelCase )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _snake_case ( self : int ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase : str = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase : Dict = tf_tokenizer(_lowerCamelCase ) # Build model with some sample inputs
__lowerCamelCase : Optional[int] = tf_tokenizer.get_config()
__lowerCamelCase : Union[str, Any] = TFGPTaTokenizer.from_config(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = model_from_config(_lowerCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _snake_case ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase : Union[str, Any] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowerCamelCase : int = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase : Dict = tf_tokenizer(_lowerCamelCase , max_length=_lowerCamelCase )
__lowerCamelCase : int = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 720
|
import qiskit
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : Dict = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase : Dict = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase : str = qiskit.execute(UpperCAmelCase , UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 458
| 0
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
a__ : List[str] = logging.get_logger(__name__)
a__ : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
a__ : Optional[Any] = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
a__ : Optional[int] = {
"""Salesforce/codegen-350M-mono""": 2_0_4_8,
}
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['input_ids', 'attention_mask']
snake_case_ = CodeGenTokenizer
def __init__( self : Union[str, Any] , a_ : Any=None , a_ : Optional[int]=None , a_ : int=None , a_ : Union[str, Any]="<|endoftext|>" , a_ : Tuple="<|endoftext|>" , a_ : Optional[Any]="<|endoftext|>" , a_ : List[Any]=False , **a_ : str , ):
"""simple docstring"""
super().__init__(
a_ , a_ , tokenizer_file=a_ , unk_token=a_ , bos_token=a_ , eos_token=a_ , add_prefix_space=a_ , **a_ , )
if kwargs.pop("""add_bos_token""" , a_ ):
lowerCamelCase__ = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
lowerCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , a_ ) != add_prefix_space:
lowerCamelCase__ = getattr(a_ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ = add_prefix_space
lowerCamelCase__ = pre_tok_class(**a_ )
lowerCamelCase__ = add_prefix_space
def _UpperCamelCase ( self : Any , *a_ : Any , **a_ : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = kwargs.get("""is_split_into_words""" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_ , **a_ )
def _UpperCamelCase ( self : Union[str, Any] , *a_ : int , **a_ : Tuple ):
"""simple docstring"""
lowerCamelCase__ = kwargs.get("""is_split_into_words""" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_ , **a_ )
def _UpperCamelCase ( self : List[Any] , a_ : str , a_ : Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def _UpperCamelCase ( self : Tuple , a_ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , a_ : bool = False , a_ : bool = None , a_ : Optional[List[str]] = None , **a_ : Optional[int] , ):
"""simple docstring"""
lowerCamelCase__ = super().decode(
token_ids=a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ , **a_ , )
if truncate_before_pattern is not None and len(a_ ) > 0:
lowerCamelCase__ = self.truncate(a_ , a_ )
return decoded_text
def _UpperCamelCase ( self : List[Any] , a_ : Tuple , a_ : Optional[int] ):
"""simple docstring"""
def find_re(a_ : Any , a_ : Tuple , a_ : Any ):
lowerCamelCase__ = pattern.search(a_ , a_ )
return m.start() if m else -1
lowerCamelCase__ = [re.compile(a_ , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCamelCase__ = list(re.finditer("""^print""" , a_ , re.MULTILINE ) )
if len(a_ ) > 1:
lowerCamelCase__ = completion[: prints[1].start()]
lowerCamelCase__ = list(re.finditer("""^def""" , a_ , re.MULTILINE ) )
if len(a_ ) > 1:
lowerCamelCase__ = completion[: defs[1].start()]
lowerCamelCase__ = 0
lowerCamelCase__ = [
pos for pos in [find_re(a_ , a_ , a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 165
|
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a__ : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _UpperCamelCase ( self : int , a_ : Union[str, Any] ):
"""simple docstring"""
if isinstance(a_ , a_ ):
lowerCamelCase__ = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : Dict , a_ : Any , a_ : Optional[int] , a_ : Tuple ):
"""simple docstring"""
if len(a_ ) == 0 or len(a_ ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(a_ ) )
if isinstance(a_ , a_ ):
lowerCamelCase__ = [sequences]
lowerCamelCase__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase_ )
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , a_ : Tuple=ZeroShotClassificationArgumentHandler() , *a_ : Optional[Any] , **a_ : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = args_parser
super().__init__(*a_ , **a_ )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _UpperCamelCase ( self : str , a_ : List[Any] , a_ : Optional[Any]=True , a_ : List[Any]=True , a_ : str=TruncationStrategy.ONLY_FIRST , **a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
lowerCamelCase__ = self.tokenizer.eos_token
try:
lowerCamelCase__ = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=a_ , )
except Exception as e:
if "too short" in str(a_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCamelCase__ = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _UpperCamelCase ( self : List[str] , **a_ : Union[str, Any] ):
"""simple docstring"""
if kwargs.get("""multi_class""" , a_ ) is not None:
lowerCamelCase__ = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
lowerCamelCase__ = {}
if "candidate_labels" in kwargs:
lowerCamelCase__ = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
lowerCamelCase__ = kwargs["""hypothesis_template"""]
lowerCamelCase__ = {}
if "multi_label" in kwargs:
lowerCamelCase__ = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : Any , a_ : Union[str, List[str]] , *a_ : str , **a_ : Dict , ):
"""simple docstring"""
if len(a_ ) == 0:
pass
elif len(a_ ) == 1 and "candidate_labels" not in kwargs:
lowerCamelCase__ = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(a_ , **a_ )
def _UpperCamelCase ( self : Optional[int] , a_ : int , a_ : List[str]=None , a_ : Tuple="This example is {}." ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = self._args_parser(a_ , a_ , a_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(a_ , a_ ) ):
lowerCamelCase__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a_ ) - 1,
**model_input,
}
def _UpperCamelCase ( self : Optional[Any] , a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = inputs["""candidate_label"""]
lowerCamelCase__ = inputs["""sequence"""]
lowerCamelCase__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCamelCase__ = self.model(**a_ )
lowerCamelCase__ = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _UpperCamelCase ( self : Union[str, Any] , a_ : List[Any] , a_ : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ = [outputs["""candidate_label"""] for outputs in model_outputs]
lowerCamelCase__ = [outputs["""sequence"""] for outputs in model_outputs]
lowerCamelCase__ = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
lowerCamelCase__ = logits.shape[0]
lowerCamelCase__ = len(a_ )
lowerCamelCase__ = N // n
lowerCamelCase__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(a_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCamelCase__ = self.entailment_id
lowerCamelCase__ = -1 if entailment_id == 0 else 0
lowerCamelCase__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCamelCase__ = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
lowerCamelCase__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCamelCase__ = reshaped_outputs[..., self.entailment_id]
lowerCamelCase__ = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
lowerCamelCase__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 165
| 1
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = """xlnet"""
_lowercase : Optional[Any] = ["""mems"""]
_lowercase : Any = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase__=32_000 , lowercase__=1_024 , lowercase__=24 , lowercase__=16 , lowercase__=4_096 , lowercase__="gelu" , lowercase__=True , lowercase__="bi" , lowercase__=0.02 , lowercase__=1E-1_2 , lowercase__=0.1 , lowercase__=512 , lowercase__=None , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=-1 , lowercase__=False , lowercase__="last" , lowercase__=True , lowercase__="tanh" , lowercase__=0.1 , lowercase__=5 , lowercase__=5 , lowercase__=5 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = vocab_size
_snake_case : Dict = d_model
_snake_case : int = n_layer
_snake_case : Union[str, Any] = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_snake_case : str = d_model // n_head
_snake_case : Any = ff_activation
_snake_case : int = d_inner
_snake_case : List[Any] = untie_r
_snake_case : Optional[int] = attn_type
_snake_case : Optional[Any] = initializer_range
_snake_case : str = layer_norm_eps
_snake_case : List[Any] = dropout
_snake_case : Any = mem_len
_snake_case : List[Any] = reuse_len
_snake_case : Union[str, Any] = bi_data
_snake_case : List[str] = clamp_len
_snake_case : Optional[int] = same_length
_snake_case : List[Any] = summary_type
_snake_case : str = summary_use_proj
_snake_case : Optional[Any] = summary_activation
_snake_case : Optional[Any] = summary_last_dropout
_snake_case : List[Any] = start_n_top
_snake_case : List[str] = end_n_top
_snake_case : Tuple = bos_token_id
_snake_case : List[Any] = pad_token_id
_snake_case : Optional[int] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , lowercase__ , )
_snake_case : Optional[int] = kwargs['''use_cache''']
_snake_case : int = use_mems_eval
_snake_case : Union[str, Any] = use_mems_train
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 47
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase : Any = TypeVar('T')
UpperCAmelCase : str = TypeVar('U')
class lowerCamelCase (Generic[T, U] ):
def __init__( self , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : str = key
_snake_case : Optional[int] = val
_snake_case : DoubleLinkedListNode[T, U] | None = None
_snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase (Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case , _snake_case : Union[str, Any] = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
_snake_case : List[Any] = ['''DoubleLinkedList''']
_snake_case : str = self.head
while node.next is not None:
rep.append(str(lowercase__ ) )
_snake_case : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case : Union[str, Any] = node
_snake_case : Optional[Any] = previous
_snake_case : int = node
_snake_case : Union[str, Any] = self.rear
def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_snake_case : Optional[int] = node.next
_snake_case : Any = node.prev
_snake_case : List[str] = None
_snake_case : Optional[int] = None
return node
class lowerCamelCase (Generic[T, U] ):
_lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
_snake_case : Union[str, Any] = capacity
_snake_case : int = 0
_snake_case : Dict = 0
_snake_case : Union[str, Any] = 0
_snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowercase__ ) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCAmelCase_ ( self , lowercase__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
_snake_case : Tuple = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase__ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case : Optional[Any] = value
self.list.add(lowercase__ )
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowercase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case : Optional[Any] = LRUCache(lowercase__ )
_snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case : Tuple = func(*lowercase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47
| 1
|
import os
# Precomputes a list of the 100 first triangular numbers
a = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def UpperCAmelCase_ ( ):
lowercase_ = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
lowercase_ = os.path.join(_UpperCAmelCase , """words.txt""" )
lowercase_ = """"""
with open(_UpperCAmelCase ) as f:
lowercase_ = f.readline()
lowercase_ = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
lowercase_ = [
word
for word in [sum(ord(_UpperCAmelCase ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 412
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] )
lowerCAmelCase = np.array(_UpperCAmelCase )
lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = (1, 2, 1)
lowerCAmelCase = (1, 1, 0, 7)
lowerCAmelCase = SARIMAX(
_UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase )
lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' )
lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] )
return result[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = regressor.predict(_UpperCAmelCase )
return y_pred[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ):
train_user.sort()
lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 )
lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 )
lowerCAmelCase = qa - qa
lowerCAmelCase = qa - (iqr * 0.1)
return low_lim
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ):
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase = not_safe + 1
else:
if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__UpperCamelCase : Any = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
__UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values)
# split data
__UpperCamelCase : Dict = normalize_df[:, 2].tolist()
__UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist()
__UpperCamelCase : List[str] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist()
__UpperCamelCase : Tuple = x[: len(x) - 1]
__UpperCamelCase : Any = x[len(x) - 1 :]
# for linear regression & sarimax
__UpperCamelCase : str = total_date[: len(total_date) - 1]
__UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1]
__UpperCamelCase : List[Any] = total_match[: len(total_match) - 1]
__UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :]
__UpperCamelCase : str = total_user[len(total_user) - 1 :]
__UpperCamelCase : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
__UpperCamelCase : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 4
| 0
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase : Optional[int] ="\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_UpperCamelCase : Any ="\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_UpperCamelCase : Any ="\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=1 , _snake_case="binary" , _snake_case=None ):
"""simple docstring"""
__lowerCamelCase = fa_score(
_snake_case , _snake_case , labels=_snake_case , pos_label=_snake_case , average=_snake_case , sample_weight=_snake_case )
return {"f1": float(_snake_case ) if score.size == 1 else score}
| 707
|
'''simple docstring'''
import functools
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = len(A_ )
__lowerCamelCase = len(A_ )
@functools.cache
def min_distance(A_ , A_ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__lowerCamelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A_ ) , 1 + min_distance(A_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575
| 0
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : Optional[Any]=10 ):
A__ = []
for _ in range(_lowerCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str]=10 ):
A__ = []
for step in range(_lowerCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(_lowerCamelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _lowerCamelCase )
A__ = torch.load(_lowerCamelCase )
scheduler.load_state_dict(_lowerCamelCase )
return lrs
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :int , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :Optional[Any] )-> int:
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] )-> Dict:
A__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
A__ = torch.tensor([0.4, 0.2, -0.5] )
A__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_00 ):
A__ = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def UpperCAmelCase_ ( self :Tuple )-> List[str]:
A__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
A__ = torch.tensor([0.4, 0.2, -0.5] )
A__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowercase_ , weight_decay=0.0 , relative_step=lowercase_ , scale_parameter=lowercase_ , warmup_init=lowercase_ , )
for _ in range(10_00 ):
A__ = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
__lowercase = nn.Linear(50 , 50 ) if is_torch_available() else None
__lowercase = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
__lowercase = 10
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :List[Any] , lowercase_ :str=None )-> Optional[int]:
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ , msg=lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] )-> Any:
A__ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A__ = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
A__, A__ = data
A__ = scheduler_func(self.optimizer , **lowercase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A__ = unwrap_schedule(lowercase_ , self.num_steps )
self.assertListAlmostEqual(
lowercase_ , lowercase_ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
A__ = scheduler_func(self.optimizer , **lowercase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowercase_ ) # wrap to test picklability of the schedule
A__ = unwrap_and_save_reload_schedule(lowercase_ , self.num_steps )
self.assertListEqual(lowercase_ , lowercase_ , msg=F"failed for {scheduler_func} in save and reload" )
class UpperCAmelCase :
def __init__( self :str , lowercase_ :List[str] )-> Tuple:
A__ = fn
def __call__( self :List[Any] , *lowercase_ :Dict , **lowercase_ :Dict )-> Tuple:
return self.fn(*lowercase_ , **lowercase_ )
@classmethod
def UpperCAmelCase_ ( self :Any , lowercase_ :Tuple )-> List[Any]:
A__ = list(map(self , scheduler.lr_lambdas ) )
| 440
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """swin2sr"""
UpperCAmelCase_ : Union[str, Any] = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Dict , lowercase_ : Union[str, Any]=64 , lowercase_ : Dict=1 , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=180 , lowercase_ : Any=[6, 6, 6, 6, 6, 6] , lowercase_ : str=[6, 6, 6, 6, 6, 6] , lowercase_ : Optional[Any]=8 , lowercase_ : Union[str, Any]=2.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Dict=0.0 , lowercase_ : Dict=0.1 , lowercase_ : Dict="gelu" , lowercase_ : Dict=False , lowercase_ : Any=0.02 , lowercase_ : Optional[Any]=1E-5 , lowercase_ : List[Any]=2 , lowercase_ : List[Any]=1.0 , lowercase_ : int="1conv" , lowercase_ : Optional[Any]="pixelshuffle" , **lowercase_ : Union[str, Any] , ) -> List[str]:
super().__init__(**lowercase_ )
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : int = embed_dim
UpperCAmelCase : Optional[int] = depths
UpperCAmelCase : List[Any] = len(lowercase_ )
UpperCAmelCase : Optional[int] = num_heads
UpperCAmelCase : List[Any] = window_size
UpperCAmelCase : Tuple = mlp_ratio
UpperCAmelCase : Optional[int] = qkv_bias
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = drop_path_rate
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = use_absolute_embeddings
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Union[str, Any] = upscale
UpperCAmelCase : Optional[int] = img_range
UpperCAmelCase : List[str] = resi_connection
UpperCAmelCase : List[str] = upsampler
| 695
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : int ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
UpperCAmelCase_ = int(input("Enter number: ").strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 539
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> Tuple:
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a = test_metrics
@require_cpu
def UpperCamelCase__ ( self ) -> Tuple:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase__ ( self ) -> str:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase__ ( self ) -> Dict:
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase__ ( self ) -> int:
print(f"Found {torch.cuda.device_count()} devices." )
__a = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 539
| 1
|
"""simple docstring"""
from __future__ import annotations
def __a ( A = 4 ):
'''simple docstring'''
lowercase__ = abs(lowercase_ ) or 4
return [[1 + x + y * row_size for x in range(lowercase_ )] for y in range(lowercase_ )]
def __a ( A ):
'''simple docstring'''
return reverse_row(transpose(lowercase_ ) )
# OR.. transpose(reverse_column(matrix))
def __a ( A ):
'''simple docstring'''
return reverse_row(reverse_column(lowercase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def __a ( A ):
'''simple docstring'''
return reverse_column(transpose(lowercase_ ) )
# OR.. transpose(reverse_row(matrix))
def __a ( A ):
'''simple docstring'''
lowercase__ = [list(lowercase_ ) for x in zip(*lowercase_ )]
return matrix
def __a ( A ):
'''simple docstring'''
lowercase__ = matrix[::-1]
return matrix
def __a ( A ):
'''simple docstring'''
lowercase__ = [x[::-1] for x in matrix]
return matrix
def __a ( A ):
'''simple docstring'''
for i in matrix:
print(*lowercase_ )
if __name__ == "__main__":
lowerCAmelCase_: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
lowerCAmelCase_: Optional[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
lowerCAmelCase_: Optional[int] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 707
|
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668
| 0
|
"""simple docstring"""
from collections import namedtuple
UpperCAmelCase__ = namedtuple("""from_to""", """from_ to""")
UpperCAmelCase__ = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_0454, 264.172),
"""cubicyard""": from_to(0.7_6455, 1.3_0795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.0_0023_6588, 4226.75),
}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ """, """.join(lowercase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ """, """.join(lowercase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'dpt'
def __init__( self : Any , __lowerCAmelCase : Optional[Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Optional[int]=12 , __lowerCAmelCase : Dict=3072 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : str=0.0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Optional[Any]=1e-1_2 , __lowerCAmelCase : List[str]=384 , __lowerCAmelCase : Tuple=16 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=[2, 5, 8, 11] , __lowerCAmelCase : Any="project" , __lowerCAmelCase : Dict=[4, 2, 1, 0.5] , __lowerCAmelCase : int=[96, 192, 384, 768] , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Optional[Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=0.4 , __lowerCAmelCase : List[str]=255 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=[1, 1024, 24, 24] , __lowerCAmelCase : List[Any]=[0, 1] , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : str , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
_UpperCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
_UpperCAmelCase = BitConfig(**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
_UpperCAmelCase = BitConfig(**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = []
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
_UpperCAmelCase = readout_type
_UpperCAmelCase = reassemble_factors
_UpperCAmelCase = neck_hidden_sizes
_UpperCAmelCase = fusion_hidden_size
_UpperCAmelCase = head_in_index
_UpperCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = semantic_loss_ignore_index
_UpperCAmelCase = semantic_classifier_dropout
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 277
| 1
|
import os
import sys
_lowerCAmelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_lowerCAmelCase : Optional[Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCAmelCase ( *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCAmelCase ( *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCAmelCase ( *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return AutoModel.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCAmelCase ( *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : int ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCAmelCase ( *_lowerCAmelCase : int , **_lowerCAmelCase : List[str] ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCAmelCase ( *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCAmelCase ( *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
| 364
|
import torch
from transformers import AutoModel
class _UpperCamelCase ( torch.nn.Module ):
def __init__( self :str , lowerCamelCase :Tuple="sayef/fsner-bert-base-uncased" ) -> int:
super(lowerCamelCase , self ).__init__()
UpperCAmelCase__ = AutoModel.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase )
UpperCAmelCase__ = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCAmelCase__ = torch.nn.Softmax(dim=1 )
def UpperCAmelCase_ ( self :Union[str, Any] , **lowerCamelCase :Tuple ) -> Dict:
return self.bert(**lowerCamelCase ).last_hidden_state
def UpperCAmelCase_ ( self :Any , lowerCamelCase :Union[str, Any] ) -> Union[str, Any]:
return token_embeddings.sum(2 , keepdim=lowerCamelCase )
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :List[Any] , lowerCamelCase :int , lowerCamelCase :Union[str, Any]=1 ) -> Dict:
return self.softmax(T * self.cos(lowerCamelCase , lowerCamelCase ) )
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :Any , lowerCamelCase :Any ) -> Union[str, Any]:
UpperCAmelCase__ = W_supports["sizes"].tolist()
UpperCAmelCase__ = W_supports["start_token_id"].item()
UpperCAmelCase__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase__ = self.BERT(**lowerCamelCase )
UpperCAmelCase__ = self.BERT(**lowerCamelCase )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = W_supports["input_ids"] == start_token_id
UpperCAmelCase__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(lowerCamelCase ):
if i == 0:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = support_sizes[i - 1]
UpperCAmelCase__ = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase__ = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase__ = torch.vstack((p_starts, p_start) )
UpperCAmelCase__ = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase__ = p_start
UpperCAmelCase__ = p_end
return p_starts, p_ends
| 364
| 1
|
"""simple docstring"""
class __snake_case :
def __init__( self: List[Any] ):
__lowerCamelCase = {}
def __a ( self: Optional[Any] ):
print(self.vertex )
for i in self.vertex:
print(A_ , """ -> """ , """ -> """.join([str(A_ ) for j in self.vertex[i]] ) )
def __a ( self: Any , A_: str , A_: Tuple ):
if from_vertex in self.vertex:
self.vertex[from_vertex].append(A_ )
else:
# else make a new vertex
__lowerCamelCase = [to_vertex]
def __a ( self: Any ):
__lowerCamelCase = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(A_ , A_ )
def __a ( self: Any , A_: List[str] , A_: Tuple ):
__lowerCamelCase = True
print(A_ , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(A_ , A_ )
if __name__ == "__main__":
__magic_name__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 281
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , unittest.TestCase ):
__a : Tuple = IFPipeline
__a : List[Any] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__a : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__a : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
return self._get_dummy_components()
def _snake_case ( self , lowercase , lowercase=0 ) -> int:
'''simple docstring'''
if str(lowercase ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(lowercase )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowercase ).manual_seed(lowercase )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> Dict:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
'''simple docstring'''
self._test_save_load_local()
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Dict = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=lowercase , tokenizer=lowercase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Tuple = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowercase , lowercase , lowercase , lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__SCREAMING_SNAKE_CASE : List[str] = IFImgaImgPipeline(**pipe_a.components )
__SCREAMING_SNAKE_CASE : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowercase , lowercase , lowercase , lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__SCREAMING_SNAKE_CASE : str = IFInpaintingPipeline(**pipe_a.components )
__SCREAMING_SNAKE_CASE : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowercase , lowercase , lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
_start_torch_memory_measurement()
__SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=lowercase , negative_prompt_embeds=lowercase , num_inference_steps=2 , generator=lowercase , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
__SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(lowercase , lowercase )
# pipeline 2
_start_torch_memory_measurement()
__SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Dict = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__SCREAMING_SNAKE_CASE : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
_start_torch_memory_measurement()
__SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , num_inference_steps=2 , generator=lowercase , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(lowercase , lowercase )
# pipeline 2
_start_torch_memory_measurement()
__SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : Tuple = pipe_a(
prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , original_image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
_start_torch_memory_measurement()
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , mask_image=lowercase , num_inference_steps=2 , generator=lowercase , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__SCREAMING_SNAKE_CASE : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(lowercase , lowercase )
# pipeline 2
_start_torch_memory_measurement()
__SCREAMING_SNAKE_CASE : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , mask_image=lowercase , original_image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__SCREAMING_SNAKE_CASE : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(lowercase , lowercase )
def A_ ( ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 158
| 0
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
debug_launcher(test_script.main )
def lowercase ( self ):
debug_launcher(test_ops.main )
| 646
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCamelCase ( _A : np.ndarray , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = math.sqrt(_A )
lowerCAmelCase : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCamelCase ( _A : np.ndarray , _A : int , _A : int , _A : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCamelCase ( _A : int , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Dict = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
lowerCAmelCase : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __UpperCamelCase ( _A : np.ndarray , _A : float , _A : float , _A : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : str = np.zeros(img.shape )
lowerCAmelCase : int = get_gauss_kernel(_A , _A )
lowerCAmelCase , lowerCAmelCase : Dict = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase : int = get_slice(_A , _A , _A , _A )
lowerCAmelCase : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase : str = vec_gaussian(_A , _A )
lowerCAmelCase : Optional[int] = np.multiply(_A , _A )
lowerCAmelCase : str = np.multiply(_A , _A )
lowerCAmelCase : Union[str, Any] = np.sum(_A ) / np.sum(_A )
lowerCAmelCase : Tuple = val
return imga
def __UpperCamelCase ( _A : list ) -> tuple:
"""simple docstring"""
lowerCAmelCase : List[Any] = args[1] if args[1:] else '../image_data/lena.jpg'
lowerCAmelCase : Any = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase : int = int(args[4] )
lowerCAmelCase : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = parse_args(sys.argv)
_lowerCAmelCase : str = cva.imread(filename, 0)
cva.imshow('input image', img)
_lowerCAmelCase : Union[str, Any] = img / 255
_lowerCAmelCase : List[str] = out.astype('float32')
_lowerCAmelCase : Optional[int] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_lowerCAmelCase : Union[str, Any] = out * 255
_lowerCAmelCase : Optional[Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 646
| 1
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowercase_ :
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE ( self : int ):
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any=True ,lowercase__ : Dict=False ,lowercase__ : Dict=False ,lowercase__ : Tuple=False ,):
__lowercase = 4
__lowercase = 3_2
__lowercase = (3_2, 3_2)
__lowercase = torch.manual_seed(0 )
__lowercase = torch.device(lowercase__ )
__lowercase = (batch_size, num_channels) + sizes
__lowercase = randn_tensor(lowercase__ ,generator=lowercase__ ,device=lowercase__ )
__lowercase = {'''hidden_states''': hidden_states}
if include_temb:
__lowercase = 1_2_8
__lowercase = randn_tensor((batch_size, temb_channels) ,generator=lowercase__ ,device=lowercase__ )
if include_res_hidden_states_tuple:
__lowercase = torch.manual_seed(1 )
__lowercase = (randn_tensor(lowercase__ ,generator=lowercase__ ,device=lowercase__ ),)
if include_encoder_hidden_states:
__lowercase = floats_tensor((batch_size, 3_2, 3_2) ).to(lowercase__ )
if include_skip_sample:
__lowercase = randn_tensor(((batch_size, 3) + sizes) ,generator=lowercase__ ,device=lowercase__ )
return dummy_input
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
if self.block_type == "up":
__lowercase = 3_2
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
__lowercase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.block_class(**lowercase__ )
unet_block.to(lowercase__ )
unet_block.eval()
with torch.no_grad():
__lowercase = unet_block(**lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = output[0]
self.assertEqual(output.shape ,self.output_shape )
__lowercase = output[0, -1, -3:, -3:]
__lowercase = torch.tensor(lowercase__ ).to(lowercase__ )
assert torch_all_close(output_slice.flatten() ,lowercase__ ,atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' ,'''Training is not supported in mps''' )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.block_class(**lowercase__ )
model.to(lowercase__ )
model.train()
__lowercase = model(**lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = output[0]
__lowercase = torch.device(lowercase__ )
__lowercase = randn_tensor(output.shape ,device=lowercase__ )
__lowercase = torch.nn.functional.mse_loss(lowercase__ ,lowercase__ )
loss.backward()
| 41
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_snake_case : Tuple = False
_snake_case : Optional[int] = True
_snake_case : Any = False
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_snake_case : Tuple = parser.parse_args()
_snake_case : Optional[Any] = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
_snake_case : Tuple = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
_snake_case : Any = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
_snake_case : List[Any] = reader.read()
_snake_case : List[Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
_snake_case : List[Any] = UNetaDModel(**config)
else:
_snake_case : int = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
_snake_case : Optional[Any] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_snake_case : Any = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_snake_case : Any = config[key]
del config[key]
_snake_case : List[str] = [k.replace("UNetRes", "") for k in config["down_block_types"]]
_snake_case : Any = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
_snake_case : Optional[int] = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
_snake_case : Any = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
_snake_case : List[Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
_snake_case : Dict = param_value
_snake_case : Any = True
if not has_changed:
_snake_case : int = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 441
| 0
|
'''simple docstring'''
from __future__ import annotations
class _A :
'''simple docstring'''
def __init__( self : int , lowerCamelCase : int )-> None:
snake_case__ : int = order
# a_{0} ... a_{k}
snake_case__ : Dict = [1.0] + [0.0] * order
# b_{0} ... b_{k}
snake_case__ : Optional[int] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
snake_case__ : str = [0.0] * self.order
# y[n-1] ... y[n-k]
snake_case__ : Any = [0.0] * self.order
def __lowerCAmelCase ( self : Any , lowerCamelCase : list[float] , lowerCamelCase : list[float] )-> None:
if len(lowerCamelCase ) < self.order:
snake_case__ : Optional[Any] = [1.0, *a_coeffs]
if len(lowerCamelCase ) != self.order + 1:
snake_case__ : Union[str, Any] = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCamelCase )}"""
)
raise ValueError(lowerCamelCase )
if len(lowerCamelCase ) != self.order + 1:
snake_case__ : Optional[int] = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCamelCase )}"""
)
raise ValueError(lowerCamelCase )
snake_case__ : Union[str, Any] = a_coeffs
snake_case__ : Union[str, Any] = b_coeffs
def __lowerCAmelCase ( self : Optional[Any] , lowerCamelCase : float )-> float:
snake_case__ : str = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
snake_case__ : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
snake_case__ : Optional[Any] = self.input_history[:-1]
snake_case__ : str = self.output_history[:-1]
snake_case__ : Optional[Any] = sample
snake_case__ : str = result
return result
| 719
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _A ( UpperCamelCase ):
'''simple docstring'''
_lowercase = 'blenderbot-small'
_lowercase = ['past_key_values']
_lowercase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] , lowerCamelCase : Any=50_265 , lowerCamelCase : str=512 , lowerCamelCase : List[Any]=8 , lowerCamelCase : List[Any]=2_048 , lowerCamelCase : str=16 , lowerCamelCase : Any=8 , lowerCamelCase : Dict=2_048 , lowerCamelCase : int=16 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : Any=0.0 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Dict="gelu" , lowerCamelCase : List[str]=512 , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : str=0.0 , lowerCamelCase : List[str]=0.02 , lowerCamelCase : List[str]=1 , lowerCamelCase : List[str]=False , lowerCamelCase : List[str]=0 , lowerCamelCase : List[str]=1 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=2 , **lowerCamelCase : Tuple , )-> Tuple:
snake_case__ : List[str] = vocab_size
snake_case__ : Dict = max_position_embeddings
snake_case__ : Union[str, Any] = d_model
snake_case__ : Any = encoder_ffn_dim
snake_case__ : Dict = encoder_layers
snake_case__ : Optional[Any] = encoder_attention_heads
snake_case__ : Dict = decoder_ffn_dim
snake_case__ : Dict = decoder_layers
snake_case__ : Dict = decoder_attention_heads
snake_case__ : Tuple = dropout
snake_case__ : Union[str, Any] = attention_dropout
snake_case__ : Union[str, Any] = activation_dropout
snake_case__ : str = activation_function
snake_case__ : Optional[Any] = init_std
snake_case__ : Tuple = encoder_layerdrop
snake_case__ : Optional[Any] = decoder_layerdrop
snake_case__ : List[str] = use_cache
snake_case__ : List[Any] = encoder_layers
snake_case__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class _A ( UpperCamelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : List[Any] )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case__ : Any = {0: """batch"""}
snake_case__ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
snake_case__ : Tuple = {0: """batch""", 1: """decoder_sequence"""}
snake_case__ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case__ , snake_case__ : List[Any] = self.num_layers
for i in range(lowerCamelCase ):
snake_case__ : Any = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
snake_case__ : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self : List[Any] )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : str = super().outputs
else:
snake_case__ : str = super(lowerCamelCase , self ).outputs
if self.use_past:
snake_case__ , snake_case__ : int = self.num_layers
for i in range(lowerCamelCase ):
snake_case__ : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __lowerCAmelCase ( self : Tuple , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
snake_case__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
snake_case__ : Any = seq_length if not self.use_past else 1
snake_case__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case__ : List[str] = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case__ , snake_case__ : int = common_inputs["""input_ids"""].shape
snake_case__ : Union[str, Any] = common_inputs["""decoder_input_ids"""].shape[1]
snake_case__ , snake_case__ : List[Any] = self.num_attention_heads
snake_case__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : int = decoder_seq_length + 3
snake_case__ : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ : Optional[int] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
snake_case__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__ , snake_case__ : Dict = self.num_layers
snake_case__ : Any = min(lowerCamelCase , lowerCamelCase )
snake_case__ : Tuple = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
snake_case__ : Optional[Any] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
snake_case__ : List[str] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
snake_case__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case__ , snake_case__ : List[str] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case__ : List[Any] = seqlen + 2
snake_case__ , snake_case__ : Union[str, Any] = self.num_layers
snake_case__ , snake_case__ : List[Any] = self.num_attention_heads
snake_case__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : str = common_inputs["""attention_mask"""].dtype
snake_case__ : List[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
snake_case__ : Tuple = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def __lowerCAmelCase ( self : List[Any] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : List[Any] = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : int = tokenizer.num_special_tokens_to_add(lowerCamelCase )
snake_case__ : List[Any] = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : Dict = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ : str = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def __lowerCAmelCase ( self : str , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
snake_case__ : int = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
snake_case__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : int )-> str:
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Tuple = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
snake_case__ : List[str] = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 172
| 0
|
from math import ceil
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = list(range(0 , A ) )
UpperCamelCase__ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCamelCase__ = []
for i in device_map_blocks:
if device_map_blocks.count(A ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(A )
# Missing blocks
UpperCamelCase__ = [i for i in blocks if i not in device_map_blocks]
UpperCamelCase__ = [i for i in device_map_blocks if i not in blocks]
if len(A ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(A ) )
if len(A ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(A ) )
if len(A ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(A ) )
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = list(range(A ) )
UpperCamelCase__ = int(ceil(n_layers / len(A ) ) )
UpperCamelCase__ = [layers[i : i + n_blocks] for i in range(0 , A , A )]
return dict(zip(A , A ) )
| 415
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ) -> Any:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embeddings_size
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_labels
UpperCamelCase__ = scope
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def _a (self ) -> List[str]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = TFRegNetModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFRegNetForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _A ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Dict =(
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =False
SCREAMING_SNAKE_CASE_ : int =False
SCREAMING_SNAKE_CASE_ : List[Any] =False
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = TFRegNetModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _a (self ) -> int:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _a (self ) -> Dict:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _a (self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ = layer_type
UpperCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'''output_hidden_states''': True} )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'''output_hidden_states''': True} )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _a (self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFRegNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __UpperCamelCase ( ):
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def _a (self ) -> Any:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
# forward pass
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
| 415
| 1
|
'''simple docstring'''
def snake_case_ ( a__ : int ,a__ : int ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
"""simple docstring"""
assert or_gate(0 ,0 ) == 0
assert or_gate(0 ,1 ) == 1
assert or_gate(1 ,0 ) == 1
assert or_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 163
|
'''simple docstring'''
from statistics import mean, stdev
def snake_case_ ( a__ : list ,a__ : int = 3 ):
"""simple docstring"""
__lowercase = min(a__ )
__lowercase = max(a__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) ,a__ ) for x in data]
def snake_case_ ( a__ : list ,a__ : int = 3 ):
"""simple docstring"""
__lowercase = mean(a__ )
__lowercase = stdev(a__ )
# standardize data
return [round((x - mu) / (sigma) ,a__ ) for x in data]
| 163
| 1
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( UpperCamelCase: Union[tf.Tensor, np.ndarray] ):
"""simple docstring"""
if isinstance(UpperCamelCase , np.ndarray ):
return list(tensor.shape )
__lowerCAmelCase = tf.shape(UpperCamelCase )
if tensor.shape == tf.TensorShape(UpperCamelCase ):
return dynamic
__lowerCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCamelCase )]
def _UpperCAmelCase ( UpperCamelCase: tf.Tensor , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[str] = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCamelCase , name=UpperCamelCase )
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Tuple , UpperCamelCase: str=1e-5 , UpperCamelCase: Optional[Any]=-1 ):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCamelCase , UpperCamelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
__lowerCAmelCase , __lowerCAmelCase = tf.nn.moments(UpperCamelCase , axes=[axis] , keepdims=UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__lowerCAmelCase = [1] * inputs.shape.rank
__lowerCAmelCase = shape_list(UpperCamelCase )[axis]
__lowerCAmelCase = tf.reshape(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = tf.reshape(UpperCamelCase , UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
__lowerCAmelCase = tf.nn.batch_normalization(
UpperCamelCase , UpperCamelCase , UpperCamelCase , offset=UpperCamelCase , scale=UpperCamelCase , variance_epsilon=UpperCamelCase , )
return outputs
def _UpperCAmelCase ( UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=0 , UpperCamelCase: Tuple=-1 ):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__lowerCAmelCase = tf.shape(UpperCamelCase )
__lowerCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__lowerCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCamelCase , UpperCamelCase )
def _UpperCAmelCase ( UpperCamelCase: tf.Tensor ):
"""simple docstring"""
if not isinstance(UpperCamelCase , tf.Tensor ):
__lowerCAmelCase = tf.convert_to_tensor(UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__lowerCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__lowerCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _UpperCAmelCase ( UpperCamelCase: tf.Tensor , UpperCamelCase: int , UpperCamelCase: str = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
UpperCamelCase , tf.cast(UpperCamelCase , dtype=tensor.dtype ) , message=(
F"The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCamelCase )}) must be smaller than the embedding "
F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__lowerCAmelCase = [x for x in data if len(UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
F"bytes: {bad_attributes}" )
__lowerCAmelCase = np.asarray(UpperCamelCase )
__lowerCAmelCase = 1
__lowerCAmelCase = np.array_split(UpperCamelCase , UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__lowerCAmelCase = np.array_split(UpperCamelCase , UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCamelCase ):
__lowerCAmelCase = chunk_data
else:
__lowerCAmelCase = data
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: Dict ):
"""simple docstring"""
if name in group.attrs:
__lowerCAmelCase = [n.decode("utf8" ) if hasattr(UpperCamelCase , "decode" ) else n for n in group.attrs[name]]
else:
__lowerCAmelCase = []
__lowerCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(UpperCamelCase , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def _UpperCAmelCase ( UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
def _expand_single_ad_tensor(UpperCamelCase: Union[str, Any] ):
if isinstance(UpperCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCamelCase )
| 611
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCAmelCase = model_type_to_module_name(UpperCamelCase )
__lowerCAmelCase = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(UpperCamelCase , UpperCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase , "__name__" , UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCAmelCase = importlib.import_module("transformers" )
if hasattr(UpperCamelCase , UpperCamelCase ):
return getattr(UpperCamelCase , UpperCamelCase )
return None
def _UpperCAmelCase ( UpperCamelCase: Union[str, os.PathLike] , UpperCamelCase: Optional[Union[str, os.PathLike]] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: Optional[Dict[str, str]] = None , UpperCamelCase: Optional[Union[bool, str]] = None , UpperCamelCase: Optional[str] = None , UpperCamelCase: bool = False , **UpperCamelCase: List[Any] , ):
"""simple docstring"""
__lowerCAmelCase = get_file_from_repo(
UpperCamelCase , UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , resume_download=UpperCamelCase , proxies=UpperCamelCase , use_auth_token=UpperCamelCase , revision=UpperCamelCase , local_files_only=UpperCamelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(UpperCamelCase , encoding="utf-8" ) as reader:
return json.load(UpperCamelCase )
class a :
def __init__( self : Optional[Any] ):
"""simple docstring"""
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(snake_case__ )
def UpperCAmelCase__ ( cls : Tuple , snake_case__ : Dict , **snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = kwargs.pop("config" , snake_case__ )
__lowerCAmelCase = kwargs.pop("trust_remote_code" , snake_case__ )
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = ImageProcessingMixin.get_image_processor_dict(snake_case__ , **snake_case__ )
__lowerCAmelCase = config_dict.get("image_processor_type" , snake_case__ )
__lowerCAmelCase = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
__lowerCAmelCase = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__lowerCAmelCase = config_dict.pop("feature_extractor_type" , snake_case__ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
__lowerCAmelCase = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__lowerCAmelCase = config_dict["auto_map"]["AutoFeatureExtractor"]
__lowerCAmelCase = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(snake_case__ , snake_case__ ):
__lowerCAmelCase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
# It could be in `config.image_processor_type``
__lowerCAmelCase = getattr(snake_case__ , "image_processor_type" , snake_case__ )
if hasattr(snake_case__ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
__lowerCAmelCase = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
__lowerCAmelCase = image_processor_class_from_name(snake_case__ )
__lowerCAmelCase = image_processor_auto_map is not None
__lowerCAmelCase = image_processor_class is not None or type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING
__lowerCAmelCase = resolve_trust_remote_code(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if has_remote_code and trust_remote_code:
__lowerCAmelCase = get_class_from_dynamic_module(
snake_case__ , snake_case__ , **snake_case__ )
__lowerCAmelCase = kwargs.pop("code_revision" , snake_case__ )
if os.path.isdir(snake_case__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING:
__lowerCAmelCase = IMAGE_PROCESSOR_MAPPING[type(snake_case__ )]
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def UpperCAmelCase__ ( snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(snake_case__ , snake_case__ )
| 611
| 1
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def snake_case ( UpperCAmelCase : Tuple ):
def wrapper(*UpperCAmelCase : List[str], **UpperCAmelCase : str ):
A = timeit.default_timer()
A = func(*UpperCAmelCase, **UpperCAmelCase )
A = timeit.default_timer() - starttime
return delta
A = func.__name__
return wrapper
def snake_case ( UpperCAmelCase : dict, UpperCAmelCase : int=1_00, UpperCAmelCase : Optional[Any]=None ):
A = []
A = seq_shapes or {}
for i in range(UpperCAmelCase ):
A = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCAmelCase, _ArrayXD ):
A = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCAmelCase, datasets.Value ):
if v.dtype == "string":
A = 'The small grey turtle was surprisingly fast when challenged.'
else:
A = np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCAmelCase, datasets.Sequence ):
while isinstance(UpperCAmelCase, datasets.Sequence ):
A = v.feature
A = seq_shapes[k]
A = np.random.rand(*UpperCAmelCase ).astype(v.dtype )
A = data
dummy_data.append((i, example) )
return dummy_data
def snake_case ( UpperCAmelCase : Union[str, Any], UpperCAmelCase : Dict, UpperCAmelCase : Dict=1_00, UpperCAmelCase : str=None ):
A = generate_examples(UpperCAmelCase, num_examples=UpperCAmelCase, seq_shapes=UpperCAmelCase )
with ArrowWriter(features=UpperCAmelCase, path=UpperCAmelCase ) as writer:
for key, record in dummy_data:
A = features.encode_example(UpperCAmelCase )
writer.write(UpperCAmelCase )
A , A = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
A = datasets.Dataset.from_file(filename=UpperCAmelCase, info=datasets.DatasetInfo(features=UpperCAmelCase ) )
return dataset
| 110
|
from math import factorial
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Any ,_SCREAMING_SNAKE_CASE : List[Any] ,_SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
'''simple docstring'''
A = real
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = [1] * rank
else:
A = rank
def __repr__( self : Any ) -> Optional[Any]:
'''simple docstring'''
return (
f'{self.real}+'
f'{"+".join(str(_SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def A( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real ,_SCREAMING_SNAKE_CASE )
def __add__( self : Optional[Any] ,_SCREAMING_SNAKE_CASE : int ) -> Any:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return Dual(self.real + other ,self.duals )
A = self.duals.copy()
A = other.duals.copy()
if len(_SCREAMING_SNAKE_CASE ) > len(_SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(_SCREAMING_SNAKE_CASE ) - len(_SCREAMING_SNAKE_CASE )) )
elif len(_SCREAMING_SNAKE_CASE ) < len(_SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(_SCREAMING_SNAKE_CASE ) - len(_SCREAMING_SNAKE_CASE )) )
A = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real ,_SCREAMING_SNAKE_CASE )
snake_case = __add__
def __sub__( self : Tuple ,_SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
'''simple docstring'''
return self + other * -1
def __mul__( self : Any ,_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other ,_SCREAMING_SNAKE_CASE )
A = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real ,_SCREAMING_SNAKE_CASE )
snake_case = __mul__
def __truediv__( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : int ) -> List[str]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other ,_SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self : Any ,_SCREAMING_SNAKE_CASE : Any ) -> Dict:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other ,_SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self : int ,_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
'''simple docstring'''
if n < 0 or isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
A = self
for _ in range(n - 1 ):
x *= self
return x
def snake_case ( UpperCAmelCase : Optional[int], UpperCAmelCase : List[Any], UpperCAmelCase : int ):
if not callable(UpperCAmelCase ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(UpperCAmelCase, (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(UpperCAmelCase, UpperCAmelCase ):
raise ValueError('differentiate() requires an int as input for order' )
A = Dual(UpperCAmelCase, 1 )
A = func(UpperCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def snake_case ( UpperCAmelCase : int ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 110
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
_lowerCamelCase : Optional[int] = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
_lowerCamelCase : Optional[Any] = parser.parse_args()
if not hasattr(_lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
_lowerCamelCase : Optional[Any] = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 44
|
from __future__ import annotations
import math
import random
from typing import Any
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Dict ):
lowercase__ : list[Any] = []
lowercase__ : int = 0
lowercase__ : int = 0
def snake_case__( self: Optional[int] ):
return self.head == self.tail
def snake_case__( self: Any, lowerCamelCase_: Any ):
self.data.append(lowerCamelCase_ )
lowercase__ : int = self.tail + 1
def snake_case__( self: Tuple ):
lowercase__ : Optional[Any] = self.data[self.head]
lowercase__ : Optional[int] = self.head + 1
return ret
def snake_case__( self: List[Any] ):
return self.tail - self.head
def snake_case__( self: List[Any] ):
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Union[str, Any], lowerCamelCase_: Any ):
lowercase__ : List[Any] = data
lowercase__ : MyNode | None = None
lowercase__ : MyNode | None = None
lowercase__ : int = 1
def snake_case__( self: Optional[Any] ):
return self.data
def snake_case__( self: List[str] ):
return self.left
def snake_case__( self: Optional[int] ):
return self.right
def snake_case__( self: Optional[Any] ):
return self.height
def snake_case__( self: Optional[int], lowerCamelCase_: Any ):
lowercase__ : Tuple = data
def snake_case__( self: Optional[int], lowerCamelCase_: MyNode | None ):
lowercase__ : Dict = node
def snake_case__( self: Union[str, Any], lowerCamelCase_: MyNode | None ):
lowercase__ : Optional[Any] = node
def snake_case__( self: List[Any], lowerCamelCase_: int ):
lowercase__ : Union[str, Any] = height
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode | None ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def SCREAMING_SNAKE_CASE__ ( _lowercase : int , _lowercase : int ) -> int:
'''simple docstring'''
if a > b:
return a
return b
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode ) -> MyNode:
'''simple docstring'''
print('left rotation node:' , node.get_data() )
lowercase__ : Optional[int] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_lowercase )
lowercase__ : List[str] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowercase )
lowercase__ : Optional[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowercase )
return ret
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode ) -> MyNode:
'''simple docstring'''
print('right rotation node:' , node.get_data() )
lowercase__ : Union[str, Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_lowercase )
lowercase__ : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowercase )
lowercase__ : List[str] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowercase )
return ret
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode ) -> MyNode:
'''simple docstring'''
lowercase__ : str = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_lowercase ) )
return right_rotation(_lowercase )
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode ) -> MyNode:
'''simple docstring'''
lowercase__ : Optional[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_lowercase ) )
return left_rotation(_lowercase )
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode | None , _lowercase : Any ) -> MyNode | None:
'''simple docstring'''
if node is None:
return MyNode(_lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowercase__ : Tuple = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowercase__ : Optional[Any] = right_rotation(_lowercase )
else:
lowercase__ : List[str] = lr_rotation(_lowercase )
else:
node.set_right(insert_node(node.get_right() , _lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowercase__ : Dict = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowercase__ : Dict = rl_rotation(_lowercase )
else:
lowercase__ : Tuple = left_rotation(_lowercase )
lowercase__ : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowercase )
return node
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode ) -> Any:
'''simple docstring'''
while True:
lowercase__ : List[Any] = root.get_right()
if right_child is None:
break
lowercase__ : List[str] = right_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode ) -> Any:
'''simple docstring'''
while True:
lowercase__ : Any = root.get_left()
if left_child is None:
break
lowercase__ : List[str] = left_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode , _lowercase : Any ) -> MyNode | None:
'''simple docstring'''
lowercase__ : List[Any] = root.get_left()
lowercase__ : List[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowercase__ : Dict = get_left_most(_lowercase )
root.set_data(_lowercase )
root.set_right(del_node(_lowercase , _lowercase ) )
elif left_child is not None:
lowercase__ : List[Any] = left_child
elif right_child is not None:
lowercase__ : Dict = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(_lowercase , _lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_lowercase , _lowercase ) )
if get_height(_lowercase ) - get_height(_lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowercase__ : List[Any] = left_rotation(_lowercase )
else:
lowercase__ : Optional[Any] = rl_rotation(_lowercase )
elif get_height(_lowercase ) - get_height(_lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowercase__ : Optional[int] = right_rotation(_lowercase )
else:
lowercase__ : Dict = lr_rotation(_lowercase )
lowercase__ : Any = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_lowercase )
return root
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: str ):
lowercase__ : MyNode | None = None
def snake_case__( self: Optional[Any] ):
return get_height(self.root )
def snake_case__( self: List[Any], lowerCamelCase_: Any ):
print('insert:' + str(lowerCamelCase_ ) )
lowercase__ : int = insert_node(self.root, lowerCamelCase_ )
def snake_case__( self: Tuple, lowerCamelCase_: Any ):
print('delete:' + str(lowerCamelCase_ ) )
if self.root is None:
print('Tree is empty!' )
return
lowercase__ : int = del_node(self.root, lowerCamelCase_ )
def __str__( self: str, ): # a level traversale, gives a more intuitive look on the tree
lowercase__ : Optional[int] = ''
lowercase__ : Any = MyQueue()
q.push(self.root )
lowercase__ : Dict = self.get_height()
if layer == 0:
return output
lowercase__ : Optional[int] = 0
while not q.is_empty():
lowercase__ : Dict = q.pop()
lowercase__ : Optional[Any] = ' ' * int(math.pow(2, layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCamelCase_ )
q.push(lowerCamelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowercase__ : Union[str, Any] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2, lowerCamelCase_ ) - 1:
lowercase__ : Optional[int] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__UpperCamelCase: int = AVLtree()
__UpperCamelCase: int = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 266
| 0
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCamelCase = object()
# For specifying empty leaf dict `{}`
__lowerCamelCase = object()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
A_ = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__UpperCamelCase ) - len(__UpperCamelCase ) + 1 ):
A_ = [x.match(__UpperCamelCase ) for x, y in zip(__UpperCamelCase, ks[i:] )]
if matches and all(__UpperCamelCase ):
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
def replace(UpperCAmelCase__, UpperCAmelCase__ ):
for rule, replacement in rules:
if _match(__UpperCamelCase, __UpperCamelCase ):
return replacement
return val
return replace
def UpperCAmelCase__ ( ) -> Dict:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""", __UpperCamelCase )),
(("transformer", "wte", "embedding"), P("""mp""", __UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCamelCase, """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""", __UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__UpperCamelCase, """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""", __UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
A_ = _get_partition_rules()
A_ = _replacement_rules(__UpperCamelCase )
A_ = {k: _unmatched for k in flatten_dict(__UpperCamelCase )}
A_ = {k: replace(__UpperCamelCase, __UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__UpperCamelCase ) )
| 711
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """pix2struct_text_model"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCAmelCase=50244 , __UpperCAmelCase=768 , __UpperCAmelCase=64 , __UpperCAmelCase=2048 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=32 , __UpperCAmelCase=128 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=1.0 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0 , __UpperCAmelCase=False , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = d_kv
__lowerCamelCase = d_ff
__lowerCamelCase = num_layers
__lowerCamelCase = num_heads
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = relative_attention_max_distance
__lowerCamelCase = dropout_rate
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_factor
__lowerCamelCase = use_cache
__lowerCamelCase = eos_token_id
__lowerCamelCase = decoder_start_token_id
# for backwards compatibility
__lowerCamelCase = dense_act_fn
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , is_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
@classmethod
def lowerCamelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__lowerCamelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """pix2struct_vision_model"""
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=768 , __UpperCAmelCase=2048 , __UpperCAmelCase=64 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=1E-6 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1E-1_0 , __UpperCAmelCase=1.0 , __UpperCAmelCase=4096 , __UpperCAmelCase=32 , __UpperCAmelCase=128 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = hidden_size
__lowerCamelCase = patch_embed_hidden_size
__lowerCamelCase = d_ff
__lowerCamelCase = dropout_rate
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = initializer_range
__lowerCamelCase = initializer_factor
__lowerCamelCase = attention_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = dense_act_fn
__lowerCamelCase = seq_len
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = relative_attention_max_distance
__lowerCamelCase = d_kv
@classmethod
def lowerCamelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__lowerCamelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """pix2struct"""
lowerCAmelCase__ = True
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=1.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
if text_config is None:
__lowerCamelCase = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__lowerCamelCase = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__lowerCamelCase = PixaStructTextConfig(**__UpperCAmelCase )
__lowerCamelCase = PixaStructVisionConfig(**__UpperCAmelCase )
__lowerCamelCase = self.text_config.decoder_start_token_id
__lowerCamelCase = self.text_config.pad_token_id
__lowerCamelCase = self.text_config.eos_token_id
__lowerCamelCase = initializer_factor
__lowerCamelCase = initializer_range
__lowerCamelCase = self.initializer_range
__lowerCamelCase = self.initializer_range
__lowerCamelCase = is_vqa
@classmethod
def lowerCamelCase ( cls , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.text_config.to_dict()
__lowerCamelCase = self.vision_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 175
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a__ ( _UpperCamelCase : List[Any] ):
if "cls_token" in name:
__lowerCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
__lowerCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
__lowerCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
__lowerCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
__lowerCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
__lowerCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
__lowerCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
__lowerCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
__lowerCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
__lowerCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
__lowerCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
__lowerCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
__lowerCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
__lowerCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
__lowerCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Dict ):
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
__lowerCamelCase = key.split('''.''' )
__lowerCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
__lowerCamelCase = config.decoder_hidden_size
__lowerCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
elif "bias" in key:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = config.hidden_size
__lowerCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
elif "bias" in key:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = val
return orig_state_dict
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Any ):
__lowerCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 10_24
__lowerCamelCase = 40_96
__lowerCamelCase = 24
__lowerCamelCase = 16
elif "huge" in checkpoint_url:
__lowerCamelCase = 14
__lowerCamelCase = 12_80
__lowerCamelCase = 51_20
__lowerCamelCase = 32
__lowerCamelCase = 16
__lowerCamelCase = ViTMAEForPreTraining(_UpperCamelCase )
__lowerCamelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase ,map_location='''cpu''' )['''model''']
__lowerCamelCase = ViTMAEImageProcessor(size=config.image_size )
__lowerCamelCase = convert_state_dict(_UpperCamelCase ,_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
__lowerCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
__lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw )
__lowerCamelCase = ViTMAEImageProcessor(size=config.image_size )
__lowerCamelCase = image_processor(images=_UpperCamelCase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits
if "large" in checkpoint_url:
__lowerCamelCase = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
__lowerCamelCase = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
__lowerCamelCase = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_UpperCamelCase ,atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 175
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase : List[str] = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
_UpperCAmelCase : int = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
_UpperCAmelCase : str = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
_UpperCAmelCase : Optional[int] = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a :str = logging.getLogger(__name__)
def _lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=__lowerCAmelCase , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=__lowerCAmelCase , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=__lowerCAmelCase , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=__lowerCAmelCase , default="""data/dump""" , help="""The dump file prefix.""" )
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE__ : List[str] = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : str = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
SCREAMING_SNAKE_CASE__ : str = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE__ : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE__ : List[Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
SCREAMING_SNAKE_CASE__ : int = fp.readlines()
logger.info("""Start encoding""" )
logger.info(F'''{len(__lowerCAmelCase )} examples to process.''' )
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1_0000
SCREAMING_SNAKE_CASE__ : Dict = time.time()
for text in data:
SCREAMING_SNAKE_CASE__ : Dict = F'''{bos} {text.strip()} {sep}'''
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
rslt.append(__lowerCAmelCase )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE__ : str = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
SCREAMING_SNAKE_CASE__ : Tuple = time.time()
logger.info("""Finished binarization""" )
logger.info(F'''{len(__lowerCAmelCase )} examples processed.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 16):
SCREAMING_SNAKE_CASE__ : Tuple = [np.uintaa(__lowerCAmelCase ) for d in rslt]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [np.intaa(__lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__lowerCAmelCase , """wb""" ) as handle:
pickle.dump(rslt_ , __lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 680
| 0
|
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->bool:
"""simple docstring"""
__lowercase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowercase : set[int] = set()
return any(
node not in visited and depth_first_search(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
for node in graph )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->bool:
"""simple docstring"""
visited.add(_lowerCamelCase )
rec_stk.add(_lowerCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_lowerCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704
|
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCamelCase, _lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(_lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any ):
inspect_dataset(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = path + """.py"""
assert script_name in os.listdir(UpperCamelCase__ )
assert "__pycache__" not in os.listdir(UpperCamelCase__ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
inspect_metric(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = path + """.py"""
assert script_name in os.listdir(UpperCamelCase__ )
assert "__pycache__" not in os.listdir(UpperCamelCase__ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = get_dataset_config_info(UpperCamelCase__ , config_name=UpperCamelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: List[str] ):
with pytest.raises(UpperCamelCase__ ):
get_dataset_config_info(UpperCamelCase__ , config_name=UpperCamelCase__ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = get_dataset_config_names(UpperCamelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = get_dataset_infos(UpperCamelCase__ )
assert list(infos.keys() ) == expected_configs
SCREAMING_SNAKE_CASE__ = expected_configs[0]
assert expected_config in infos
SCREAMING_SNAKE_CASE__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = get_dataset_infos(UpperCamelCase__ )
assert expected_config in infos
SCREAMING_SNAKE_CASE__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: int ):
with pytest.raises(UpperCamelCase__ ):
get_dataset_split_names(UpperCamelCase__ , config_name=UpperCamelCase__ )
| 6
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = torch.device("""cpu""")
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : Any = Image.open(requests.get(__a , stream=__a ).raw )
return im
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def UpperCAmelCase_ (__a : List[Any] , __a : List[str] , __a : Tuple ):
"""simple docstring"""
_a : Union[str, Any] = dct.pop(__a )
_a : List[str] = val
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : Union[str, Any] = []
for k in state_dict.keys():
_a : int = k
if ".pwconv" in k:
_a : List[str] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
_a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
_a : Union[str, Any] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
_a : Tuple = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
_a : List[Any] = k_new.split('.' )
if ls[2].isdigit():
_a : str = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
_a : Any = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Dict ):
"""simple docstring"""
_a : Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_a : Optional[Any] = 1_0_0_0
_a : Any = 'huggingface/label-files'
_a : Dict = 'imagenet-1k-id2label.json'
_a : List[Any] = json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
_a : Optional[Any] = {int(__a ): v for k, v in idalabel.items()}
_a : List[Any] = idalabel
_a : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_a : str = [3, 3, 6, 4]
_a : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
_a : Dict = [3, 3, 9, 6]
_a : Union[str, Any] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
_a : Union[str, Any] = [4, 3, 1_0, 5]
_a : Tuple = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
_a : int = [4, 4, 1_2, 6]
_a : Tuple = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
_a : Union[str, Any] = torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
_a : List[Any] = torch.load(__a , map_location='cpu' )
_a : Optional[int] = checkpoint
_a : Any = create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
_a : Optional[Any] = SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
_a : Optional[int] = prepare_img()
_a : int = ViTImageProcessor.from_pretrained('preprocessor_config' )
_a : List[Any] = processor(images=__a , return_tensors='pt' )
# compare outputs from both models
_a : Any = get_expected_output(__a )
_a : int = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
__lowerCAmelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 229
| 0
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
lowerCamelCase ={
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
lowerCamelCase ={
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : Dict = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ : str = numpy_to_pil(UpperCamelCase__ )
return images
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
if images.ndim == 3:
UpperCamelCase__ : Tuple = images[None, ...]
UpperCamelCase__ : List[str] = (images * 2_5_5).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ : int = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
UpperCamelCase__ : Optional[int] = [Image.fromarray(UpperCamelCase__ ) for image in images]
return pil_images
| 462
|
import math
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : List[str] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ = 1 / 1_2_3_4_5 ):
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Optional[Any] = 3
while True:
UpperCamelCase__ : Union[str, Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(UpperCamelCase__ ):
UpperCamelCase__ : int = int(UpperCamelCase__ )
total_partitions += 1
if check_partition_perfect(UpperCamelCase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(UpperCamelCase__ )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 462
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : str = (DEISMultistepScheduler,)
_lowerCAmelCase : Tuple = (("""num_inference_steps""", 2_5),)
def _snake_case ( self : Union[str, Any] , **lowercase_ : int ):
snake_case_ : Any = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**lowercase_ )
return config
def _snake_case ( self : Optional[Any] , lowercase_ : Any=0 , **lowercase_ : Dict ):
snake_case_ : Union[str, Any] = dict(self.forward_default_kwargs )
snake_case_ : Union[str, Any] = kwargs.pop('''num_inference_steps''' , lowercase_ )
snake_case_ : int = self.dummy_sample
snake_case_ : Optional[Any] = 0.1 * sample
snake_case_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ : List[str] = self.get_scheduler_config(**lowercase_ )
snake_case_ : Union[str, Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
snake_case_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
snake_case_ : Dict = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
snake_case_ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_, snake_case_ : str = sample, sample
for t in range(lowercase_ , time_step + scheduler.config.solver_order + 1 ):
snake_case_ : Dict = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
snake_case_ : Dict = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self : Dict ):
pass
def _snake_case ( self : Optional[int] , lowercase_ : str=0 , **lowercase_ : Optional[int] ):
snake_case_ : Tuple = dict(self.forward_default_kwargs )
snake_case_ : Any = kwargs.pop('''num_inference_steps''' , lowercase_ )
snake_case_ : int = self.dummy_sample
snake_case_ : Optional[int] = 0.1 * sample
snake_case_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ : List[str] = self.get_scheduler_config()
snake_case_ : Tuple = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
snake_case_ : List[str] = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
snake_case_ : int = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self : str , lowercase_ : Any=None , **lowercase_ : Dict ):
if scheduler is None:
snake_case_ : int = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
snake_case_ : List[str] = scheduler_class(**lowercase_ )
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : Optional[int] = self.get_scheduler_config(**lowercase_ )
snake_case_ : str = scheduler_class(**lowercase_ )
snake_case_ : List[Any] = 10
snake_case_ : Optional[Any] = self.dummy_model()
snake_case_ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : List[str] = model(lowercase_ , lowercase_ )
snake_case_ : Union[str, Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def _snake_case ( self : int ):
snake_case_ : int = dict(self.forward_default_kwargs )
snake_case_ : Dict = kwargs.pop('''num_inference_steps''' , lowercase_ )
for scheduler_class in self.scheduler_classes:
snake_case_ : Any = self.get_scheduler_config()
snake_case_ : int = scheduler_class(**lowercase_ )
snake_case_ : Optional[Any] = self.dummy_sample
snake_case_ : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps''' ):
snake_case_ : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case_ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
snake_case_ : Dict = scheduler.timesteps[5]
snake_case_ : Dict = scheduler.timesteps[6]
snake_case_ : Union[str, Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
snake_case_ : Optional[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self : Any ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case_ : List[Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
snake_case_ : str = self.full_loop(scheduler=lowercase_ )
snake_case_ : List[Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
snake_case_ : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case_ : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case_ : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
snake_case_ : Any = self.full_loop(scheduler=lowercase_ )
snake_case_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def _snake_case ( self : Tuple ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def _snake_case ( self : Optional[int] ):
self.check_over_configs(thresholding=lowercase_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , algorithm_type='''deis''' , solver_order=lowercase_ , solver_type=lowercase_ , )
def _snake_case ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def _snake_case ( self : Optional[int] ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase_ , solver_type=lowercase_ , prediction_type=lowercase_ , algorithm_type=lowercase_ , )
snake_case_ : int = self.full_loop(
solver_order=lowercase_ , solver_type=lowercase_ , prediction_type=lowercase_ , algorithm_type=lowercase_ , )
assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers"
def _snake_case ( self : Optional[int] ):
self.check_over_configs(lower_order_final=lowercase_ )
self.check_over_configs(lower_order_final=lowercase_ )
def _snake_case ( self : Union[str, Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowercase_ , time_step=0 )
def _snake_case ( self : Tuple ):
snake_case_ : str = self.full_loop()
snake_case_ : Union[str, Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def _snake_case ( self : Any ):
snake_case_ : Tuple = self.full_loop(prediction_type='''v_prediction''' )
snake_case_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.0_91 ) < 1E-3
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[int] = self.scheduler_classes[0]
snake_case_ : List[Any] = self.get_scheduler_config(thresholding=lowercase_ , dynamic_thresholding_ratio=0 )
snake_case_ : List[str] = scheduler_class(**lowercase_ )
snake_case_ : str = 10
snake_case_ : Optional[int] = self.dummy_model()
snake_case_ : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Optional[Any] = model(lowercase_ , lowercase_ )
snake_case_ : Tuple = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
assert sample.dtype == torch.floataa
| 123
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_a ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def __lowercase ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_a ):
http_head('''https://huggingface.co''' )
| 123
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
snake_case_ : List[str] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = LxmertTokenizer
UpperCAmelCase = LxmertTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCamelCase ( self : List[str] , _a : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE ='''unwanted, running'''
return input_text, output_text
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE ='''I was born in 92000, and this is falsé.'''
_SCREAMING_SNAKE_CASE =tokenizer.tokenize(_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a , add_special_tokens=_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 191
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = CanineTokenizer
_lowercase = False
def _UpperCamelCase( self : Dict ):
super().setUp()
a__ : str = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase( self : Union[str, Any] ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def _UpperCamelCase( self : Optional[Any] , **lowerCamelCase__ : Tuple ):
a__ : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
a__ : Optional[int] = 1_024
return tokenizer
@require_torch
def _UpperCamelCase( self : Tuple ):
a__ : Dict = self.canine_tokenizer
a__ : Union[str, Any] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
a__ : Dict = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0]
# fmt: on
a__ : str = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
a__ : str = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _UpperCamelCase( self : Optional[int] ):
a__ : Optional[Any] = self.canine_tokenizer
a__ : List[Any] = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
a__ : Any = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , lowerCamelCase__ )
self.assertIn("attention_mask" , lowerCamelCase__ )
self.assertIn("token_type_ids" , lowerCamelCase__ )
@require_torch
def _UpperCamelCase( self : List[str] ):
a__ : int = self.canine_tokenizer
a__ : List[Any] = [
"What's the weater?",
"It's about 25 degrees.",
]
a__ : Dict = tokenizer(
text_target=lowerCamelCase__ , max_length=32 , padding="max_length" , truncation=lowerCamelCase__ , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _UpperCamelCase( self : int ):
# safety check on max_len default value so we are sure the test works
a__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a__ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ : Optional[Any] = tempfile.mkdtemp()
a__ : int = " He is very happy, UNwant\u00E9d,running"
a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
a__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase__ )
a__ : Optional[int] = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
shutil.rmtree(lowerCamelCase__ )
a__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ : Tuple = tempfile.mkdtemp()
a__ : int = " He is very happy, UNwant\u00E9d,running"
a__ : Any = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
a__ : List[Any] = chr(0XE007 )
additional_special_tokens.append(lowerCamelCase__ )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
a__ : str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
a__ : Tuple = tokenizer.__class__.from_pretrained(lowerCamelCase__ )
a__ : List[Any] = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertIn(lowerCamelCase__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a__ : List[str] = tokenizer.__class__.from_pretrained(lowerCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : Optional[int] = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a__, a__ : Tuple = self.get_clean_sequence(lowerCamelCase__ )
# a special token for Canine can be defined as follows:
a__ : List[Any] = 0XE005
a__ : Optional[Any] = chr(lowerCamelCase__ )
tokenizer.add_special_tokens({"cls_token": special_token} )
a__ : str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , 1 )
a__ : Optional[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCamelCase__ )
a__ : Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
a__ : Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , input_encoded + special_token_id )
a__ : List[Any] = tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
self.assertTrue(special_token not in decoded )
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a__ : List[Any] = chr(0XE005 )
a__ : str = chr(0XE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCamelCase__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
a__ : Dict = tokenizer.tokenize(lowerCamelCase__ )
a__ : int = tokenizer.tokenize(lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , 1 )
self.assertEqual(len(lowerCamelCase__ ) , 1 )
self.assertEqual(token_a[0] , lowerCamelCase__ )
self.assertEqual(token_a[0] , lowerCamelCase__ )
@require_tokenizers
def _UpperCamelCase( self : Tuple ):
a__ : Any = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
a__ : List[Any] = 0XE006
a__ : Union[str, Any] = chr(lowerCamelCase__ )
a__ : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCamelCase__ )
tokenizer.from_pretrained(lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
a__ : Dict = json.load(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
a__ : Tuple = json.load(lowerCamelCase__ )
# a special token for Canine can be defined as follows:
a__ : Optional[int] = 0XE006
a__ : Tuple = chr(lowerCamelCase__ )
a__ : List[Any] = [new_token_a]
a__ : Optional[Any] = [new_token_a]
with open(os.path.join(lowerCamelCase__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a__ : Any = tokenizer_class.from_pretrained(lowerCamelCase__ , extra_ids=0 )
self.assertIn(lowerCamelCase__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
a__ : Optional[Any] = 0XE007
a__ : str = chr(lowerCamelCase__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a__ : List[str] = [AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ )]
a__ : List[str] = tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , extra_ids=0 )
self.assertIn(lowerCamelCase__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _UpperCamelCase( self : Union[str, Any] ):
a__ : int = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a__ : Optional[Any] = "hello world"
if self.space_between_special_tokens:
a__ : Any = "[CLS] hello world [SEP]"
else:
a__ : str = input
a__ : Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
a__ : Dict = tokenizer.decode(lowerCamelCase__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCamelCase__ , [output, output.lower()] )
def _UpperCamelCase( self : Dict ):
a__ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a__ : int = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
a__ : Optional[Any] = "a"
a__ : Optional[Any] = ord(lowerCamelCase__ )
for attr in attributes_list:
setattr(lowerCamelCase__ , attr + "_id" , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , attr + "_id" ) , lowerCamelCase__ )
setattr(lowerCamelCase__ , attr + "_id" , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , attr + "_id" ) , lowerCamelCase__ )
setattr(lowerCamelCase__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens_ids" ) , [] )
a__ : List[Any] = 0XE006
a__ : Dict = chr(lowerCamelCase__ )
setattr(lowerCamelCase__ , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _UpperCamelCase( self : str ):
pass
def _UpperCamelCase( self : int ):
pass
def _UpperCamelCase( self : Any ):
pass
def _UpperCamelCase( self : Any ):
pass
def _UpperCamelCase( self : List[str] ):
pass
def _UpperCamelCase( self : List[str] ):
pass
def _UpperCamelCase( self : List[Any] ):
pass
def _UpperCamelCase( self : Optional[int] ):
pass
| 37
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ : Union[str, Any] = 1_6
UpperCAmelCase__ : str = 3_2
def lowercase_ ( _snake_case ,_snake_case = 16 ):
SCREAMING_SNAKE_CASE__ : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(_snake_case ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : int = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_snake_case ,max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = datasets.map(
_snake_case ,batched=_snake_case ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Tuple = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(_snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Dict = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Tuple = 8
else:
SCREAMING_SNAKE_CASE__ : Any = None
return tokenizer.pad(
_snake_case ,padding="""longest""" ,max_length=_snake_case ,pad_to_multiple_of=_snake_case ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : List[Any] = DataLoader(
tokenized_datasets["""train"""] ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ : Tuple = mocked_dataloaders # noqa: F811
def lowercase_ ( _snake_case ,_snake_case ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,_snake_case ) == "1":
SCREAMING_SNAKE_CASE__ : int = 2
# New Code #
SCREAMING_SNAKE_CASE__ : Tuple = int(args.gradient_accumulation_steps )
SCREAMING_SNAKE_CASE__ : List[str] = int(args.local_sgd_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : Optional[Any] = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=_snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : List[Any] = config["""lr"""]
SCREAMING_SNAKE_CASE__ : List[Any] = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ : Any = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ : Tuple = evaluate.load("""glue""" ,"""mrpc""" )
set_seed(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_dataloaders(_snake_case ,_snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AdamW(params=model.parameters() ,lr=_snake_case )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=_snake_case ,num_warmup_steps=100 ,num_training_steps=(len(_snake_case ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = accelerator.prepare(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
with LocalSGD(
accelerator=_snake_case ,model=_snake_case ,local_sgd_steps=_snake_case ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = model(**_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.loss
accelerator.backward(_snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**_snake_case )
SCREAMING_SNAKE_CASE__ : Any = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_snake_case ,references=_snake_case ,)
SCREAMING_SNAKE_CASE__ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=_snake_case ,default=_snake_case ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" ,type=_snake_case ,default=1 ,help="""The number of minibatches to be ran before gradients are accumulated.""" ,)
parser.add_argument(
"""--local_sgd_steps""" ,type=_snake_case ,default=8 ,help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_snake_case ,_snake_case )
if __name__ == "__main__":
main()
| 223
| 0
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
lowerCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 681
|
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = n
while left <= right:
lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase : int = mid - 1
else:
lowerCAmelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681
| 1
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''umt5'''
lowerCamelCase_ = ['''past_key_values''']
def __init__( self , lowercase=2_5_0_1_1_2 , lowercase=5_1_2 , lowercase=6_4 , lowercase=1_0_2_4 , lowercase=8 , lowercase=None , lowercase=6 , lowercase=3_2 , lowercase=1_2_8 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gated-gelu" , lowercase=True , lowercase=True , lowercase="T5Tokenizer" , lowercase=True , lowercase=0 , lowercase=1 , lowercase=0 , **lowercase , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowercase , tokenizer_class=lowercase , tie_word_embeddings=lowercase , pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , )
A_ : Optional[Any] = vocab_size
A_ : List[Any] = d_model
A_ : Any = d_kv
A_ : Union[str, Any] = d_ff
A_ : List[Any] = num_layers
A_ : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : Dict = num_heads
A_ : Tuple = relative_attention_num_buckets
A_ : List[Any] = relative_attention_max_distance
A_ : Optional[int] = dropout_rate
A_ : Tuple = layer_norm_epsilon
A_ : List[str] = initializer_factor
A_ : Dict = feed_forward_proj
A_ : Union[str, Any] = use_cache
A_ : Union[str, Any] = self.feed_forward_proj.split('-' )
A_ : Optional[Any] = act_info[-1]
A_ : List[str] = act_info[0] == 'gated'
if len(lowercase ) > 1 and act_info[0] != "gated" or len(lowercase ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
A_ : Tuple = 'gelu_new'
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.d_model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.num_heads
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.num_layers
class UpperCAmelCase ( __A ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
A_ : Tuple = 'past_encoder_sequence + sequence'
A_ : List[str] = {0: 'batch'}
A_ : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A_ : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
A_ : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1_3
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 5E-4
| 558
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase = pytest.mark.integration
@pytest.mark.parametrize('path' ,['paws', 'csv'] )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ):
'''simple docstring'''
inspect_dataset(__lowercase ,__lowercase )
A_ : Optional[Any] = path + '.py'
assert script_name in os.listdir(__lowercase )
assert "__pycache__" not in os.listdir(__lowercase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' ,['accuracy'] )
def UpperCamelCase ( __lowercase : Any ,__lowercase : Union[str, Any] ):
'''simple docstring'''
inspect_metric(__lowercase ,__lowercase )
A_ : Optional[Any] = path + '.py'
assert script_name in os.listdir(__lowercase )
assert "__pycache__" not in os.listdir(__lowercase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Dict ):
'''simple docstring'''
A_ : List[Any] = get_dataset_config_info(__lowercase ,config_name=__lowercase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def UpperCamelCase ( __lowercase : Dict ,__lowercase : List[Any] ,__lowercase : int ):
'''simple docstring'''
with pytest.raises(__lowercase ):
get_dataset_config_info(__lowercase ,config_name=__lowercase )
@pytest.mark.parametrize(
'path, expected' ,[
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] ,)
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
A_ : Any = get_dataset_config_names(__lowercase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' ,[
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] ,)
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : str ,__lowercase : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = get_dataset_infos(__lowercase )
assert list(infos.keys() ) == expected_configs
A_ : Any = expected_configs[0]
assert expected_config in infos
A_ : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def UpperCamelCase ( __lowercase : Any ,__lowercase : Optional[Any] ,__lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = get_dataset_infos(__lowercase )
assert expected_config in infos
A_ : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Tuple ,__lowercase : str ):
'''simple docstring'''
with pytest.raises(__lowercase ):
get_dataset_split_names(__lowercase ,config_name=__lowercase )
| 558
| 1
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( lowerCamelCase_ : int = 4 ):
SCREAMING_SNAKE_CASE__ = abs(lowerCamelCase_ ) or 4
return [[1 + x + y * row_size for x in range(lowerCamelCase_ )] for y in range(lowerCamelCase_ )]
def __lowercase ( lowerCamelCase_ : list[list[int]] ):
return reverse_row(transpose(lowerCamelCase_ ) )
# OR.. transpose(reverse_column(matrix))
def __lowercase ( lowerCamelCase_ : list[list[int]] ):
return reverse_row(reverse_column(lowerCamelCase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def __lowercase ( lowerCamelCase_ : list[list[int]] ):
return reverse_column(transpose(lowerCamelCase_ ) )
# OR.. transpose(reverse_row(matrix))
def __lowercase ( lowerCamelCase_ : list[list[int]] ):
SCREAMING_SNAKE_CASE__ = [list(lowerCamelCase_ ) for x in zip(*lowerCamelCase_ )]
return matrix
def __lowercase ( lowerCamelCase_ : list[list[int]] ):
SCREAMING_SNAKE_CASE__ = matrix[::-1]
return matrix
def __lowercase ( lowerCamelCase_ : list[list[int]] ):
SCREAMING_SNAKE_CASE__ = [x[::-1] for x in matrix]
return matrix
def __lowercase ( lowerCamelCase_ : list[list[int]] ):
for i in matrix:
print(*lowerCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
_lowerCamelCase = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
_lowerCamelCase = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 719
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowerCamelCase = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
SCREAMING_SNAKE_CASE__ = os.path.abspath("examples" )
for item in os.listdir(UpperCAmelCase__ ):
if item not in EXCLUDE_EXAMPLES:
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
if os.path.isfile(UpperCAmelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase__ , feature_script=UpperCAmelCase__ , tested_section="main()" if parser_only else "training_function()" , ):
SCREAMING_SNAKE_CASE__ = compare_against_test(
os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = "\n".join(UpperCAmelCase__ )
if special_strings is not None:
for string in special_strings:
SCREAMING_SNAKE_CASE__ = diff.replace(UpperCAmelCase__ , "" )
self.assertEqual(UpperCAmelCase__ , "" )
def lowerCAmelCase__ ( self ):
self.one_complete_example("complete_nlp_example.py" , UpperCAmelCase__ )
self.one_complete_example("complete_nlp_example.py" , UpperCAmelCase__ )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
SCREAMING_SNAKE_CASE__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.one_complete_example("complete_cv_example.py" , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : Dict = False
@classmethod
def lowerCAmelCase__ ( cls ):
super().setUpClass()
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowerCAmelCase__ ( cls ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
SCREAMING_SNAKE_CASE__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
SCREAMING_SNAKE_CASE__ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
self.assertNotIn("epoch 0:" , UpperCAmelCase__ )
self.assertIn("epoch 1:" , UpperCAmelCase__ )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
SCREAMING_SNAKE_CASE__ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , UpperCAmelCase__ )
self.assertIn("epoch 1:" , UpperCAmelCase__ )
else:
self.assertIn("epoch 0:" , UpperCAmelCase__ )
self.assertIn("epoch 1:" , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
SCREAMING_SNAKE_CASE__ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = re.findall("({.+})" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = [r for r in results if "accuracy" in r][-1]
SCREAMING_SNAKE_CASE__ = ast.literal_eval(UpperCAmelCase__ )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCAmelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
SCREAMING_SNAKE_CASE__ = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , "tracking" ) ) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 112
| 0
|
"""simple docstring"""
import pprint
import requests
lowerCamelCase_ = '''https://zenquotes.io/api'''
def snake_case ( ):
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def snake_case ( ):
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
lowerCamelCase_ = random_quotes()
pprint.pprint(response)
| 95
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case_ ( __UpperCamelCase ):
"""simple docstring"""
snake_case__ = """glpn"""
def __init__(self: Tuple , __UpperCAmelCase: Tuple=3 , __UpperCAmelCase: Dict=4 , __UpperCAmelCase: Any=[2, 2, 2, 2] , __UpperCAmelCase: Optional[int]=[8, 4, 2, 1] , __UpperCAmelCase: Dict=[32, 64, 160, 256] , __UpperCAmelCase: List[str]=[7, 3, 3, 3] , __UpperCAmelCase: Dict=[4, 2, 2, 2] , __UpperCAmelCase: Optional[int]=[1, 2, 5, 8] , __UpperCAmelCase: Dict=[4, 4, 4, 4] , __UpperCAmelCase: List[str]="gelu" , __UpperCAmelCase: str=0.0 , __UpperCAmelCase: List[Any]=0.0 , __UpperCAmelCase: Dict=0.02 , __UpperCAmelCase: List[str]=0.1 , __UpperCAmelCase: Union[str, Any]=1E-6 , __UpperCAmelCase: Dict=64 , __UpperCAmelCase: Dict=10 , __UpperCAmelCase: Union[str, Any]=-1 , **__UpperCAmelCase: Dict , ) -> str:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__a : Optional[Any] = num_channels
__a : Tuple = num_encoder_blocks
__a : int = depths
__a : Union[str, Any] = sr_ratios
__a : str = hidden_sizes
__a : List[Any] = patch_sizes
__a : Optional[Any] = strides
__a : Any = mlp_ratios
__a : Any = num_attention_heads
__a : Any = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : int = attention_probs_dropout_prob
__a : List[str] = initializer_range
__a : Optional[int] = drop_path_rate
__a : Any = layer_norm_eps
__a : List[Any] = decoder_hidden_size
__a : Any = max_depth
__a : int = head_in_index
| 351
| 0
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowercase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""DPTFeatureExtractor"""]
__lowercase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""yjernite/retribert-base-uncased""": 512,
}
__lowercase = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class _lowercase ( __lowerCamelCase ):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Tuple = PRETRAINED_INIT_CONFIGURATION
_lowercase : List[Any] = RetriBertTokenizer
_lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , lowerCamelCase__ : int=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]="[UNK]" , lowerCamelCase__ : Optional[Any]="[SEP]" , lowerCamelCase__ : List[Any]="[PAD]" , lowerCamelCase__ : Tuple="[CLS]" , lowerCamelCase__ : List[Any]="[MASK]" , lowerCamelCase__ : Dict=True , lowerCamelCase__ : str=None , **lowerCamelCase__ : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
A_ = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**lowerCamelCase__ )
A_ = do_lower_case
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str]=None ) -> Union[str, Any]:
"""simple docstring"""
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
A_ = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 563
| 0
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_UpperCAmelCase = remove_duplicates(key.upper() )
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
# First fill cipher with key characters
_UpperCAmelCase = {alphabet[i]: char for i, char in enumerate(_SCREAMING_SNAKE_CASE )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_SCREAMING_SNAKE_CASE ) , 26 ):
_UpperCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_UpperCAmelCase = alphabet[i - offset]
_UpperCAmelCase = char
return cipher_alphabet
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : dict[str, str] ):
'''simple docstring'''
_UpperCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = input('''Enter message to encode or decode: ''' ).strip()
_UpperCAmelCase = input('''Enter keyword: ''' ).strip()
_UpperCAmelCase = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
_UpperCAmelCase = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
_UpperCAmelCase = create_cipher_map(_SCREAMING_SNAKE_CASE )
print(func(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 602
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : str=1_3 , __UpperCamelCase : Dict=7 , __UpperCamelCase : Dict=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=9_9 , __UpperCamelCase : List[Any]=3_2 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : str=4 , __UpperCamelCase : Any=3_7 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : List[str]=1_2_8 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Tuple=1_6 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Union[str, Any]=None , )->List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] )->Optional[Any]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : List[Any] )->List[Any]:
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : str )->Dict:
_UpperCAmelCase = NezhaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , )->Optional[Any]:
_UpperCAmelCase = True
_UpperCAmelCase = NezhaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] )->Optional[int]:
_UpperCAmelCase = NezhaForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] )->Optional[Any]:
_UpperCAmelCase = NezhaForNextSentencePrediction(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->Optional[Any]:
_UpperCAmelCase = NezhaForPreTraining(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , next_sentence_label=__UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : str )->Tuple:
_UpperCAmelCase = NezhaForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any )->Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NezhaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any )->Tuple:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NezhaForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Dict )->Union[str, Any]:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = NezhaForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Tuple )->int:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
def lowercase__ ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=False )->Union[str, Any]:
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def lowercase__ ( self : List[Any] )->List[Any]:
_UpperCAmelCase = NezhaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[Any] )->Any:
self.config_tester.run_common_tests()
def lowercase__ ( self : int )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : Tuple )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->int:
# This regression test was failing with PyTorch < 1.3
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
def lowercase__ ( self : Tuple )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def lowercase__ ( self : Tuple )->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def lowercase__ ( self : Any )->int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : Any )->List[str]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = NezhaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
@require_torch_gpu
def lowercase__ ( self : Any )->Dict:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_UpperCAmelCase = True
_UpperCAmelCase = model_class(config=__UpperCamelCase )
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = torch.jit.trace(
__UpperCamelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCamelCase , os.path.join(__UpperCamelCase , '''bert.pt''' ) )
_UpperCAmelCase = torch.jit.load(os.path.join(__UpperCamelCase , '''bert.pt''' ) , map_location=__UpperCamelCase )
loaded(inputs_dict['''input_ids'''].to(__UpperCamelCase ) , inputs_dict['''attention_mask'''].to(__UpperCamelCase ) )
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : int )->Optional[Any]:
_UpperCAmelCase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
_UpperCAmelCase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : int )->Any:
_UpperCAmelCase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
_UpperCAmelCase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1e-4 ) )
| 602
| 1
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : str = set()
__UpperCAmelCase : Any = []
def parse_line(lowercase_ ):
for line in fp:
if isinstance(lowercase_ , lowercase_ ):
__UpperCAmelCase : List[Any] = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(lowercase_ ) > 0:
__UpperCAmelCase : Any = '''\n'''.join(lowercase_ )
# Only keep the warnings specified in `targets`
if any(f": {x}: " in warning for x in targets ):
selected_warnings.add(lowercase_ )
buffer.clear()
continue
else:
__UpperCAmelCase : str = line.strip()
buffer.append(lowercase_ )
if from_gh:
for filename in os.listdir(lowercase_ ):
__UpperCAmelCase : Optional[Any] = os.path.join(lowercase_ , lowercase_ )
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowercase_ ) as fp:
parse_line(lowercase_ )
else:
try:
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowercase_ ) as fp:
parse_line(lowercase_ )
except Exception:
logger.warning(
f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = set()
__UpperCAmelCase : Optional[Any] = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) )
return selected_warnings
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
return values.split(''',''' )
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCAmelCase = extract_warnings(args.output_dir, args.targets)
lowerCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 675
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675
| 1
|
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase__ : List[str] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowercase__ : str = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def a__ ( lowercase : Optional[int], lowercase : Optional[int]=False ) -> Tuple:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = create_model(
'''HTSAT-tiny''', '''roberta''', lowercase, precision='''fp32''', device='''cuda:0''' if torch.cuda.is_available() else '''cpu''', enable_fusion=lowercase, fusion_type='''aff_2d''' if enable_fusion else None, )
return model, model_cfg
def a__ ( lowercase : List[Any] ) -> Any:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = r'''.*sequential.(\d+).*'''
_UpperCamelCase = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_UpperCamelCase = key.replace(lowercase, lowercase )
if re.match(lowercase, lowercase ):
# replace sequential layers with list
_UpperCamelCase = re.match(lowercase, lowercase ).group(1 )
_UpperCamelCase = key.replace(F"""sequential.{sequential_layer}.""", F"""layers.{int(lowercase )//3}.linear.""" )
elif re.match(lowercase, lowercase ):
_UpperCamelCase = int(re.match(lowercase, lowercase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_UpperCamelCase = 1 if projecton_layer == 0 else 2
_UpperCamelCase = key.replace(F"""_projection.{projecton_layer}.""", F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_UpperCamelCase = value
_UpperCamelCase = mixed_qkv.size(0 ) // 3
_UpperCamelCase = mixed_qkv[:qkv_dim]
_UpperCamelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
_UpperCamelCase = mixed_qkv[qkv_dim * 2 :]
_UpperCamelCase = query_layer
_UpperCamelCase = key_layer
_UpperCamelCase = value_layer
else:
_UpperCamelCase = value
return model_state_dict
def a__ ( lowercase : Optional[int], lowercase : Dict, lowercase : List[str], lowercase : Dict=False ) -> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = init_clap(lowercase, enable_fusion=lowercase )
clap_model.eval()
_UpperCamelCase = clap_model.state_dict()
_UpperCamelCase = rename_state_dict(lowercase )
_UpperCamelCase = ClapConfig()
_UpperCamelCase = enable_fusion
_UpperCamelCase = ClapModel(lowercase )
# ignore the spectrogram embedding layer
model.load_state_dict(lowercase, strict=lowercase )
model.save_pretrained(lowercase )
transformers_config.save_pretrained(lowercase )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowercase__ : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 98
|
'''simple docstring'''
def a__ ( lowercase : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
_UpperCamelCase = 0
_UpperCamelCase = len(lowercase ) - 1
_UpperCamelCase = 0
while index >= 0:
_UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26, lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 98
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[int] = '''gpt_bigcode'''
_lowercase : str = ['''past_key_values''']
_lowercase : Union[str, Any] = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=50_257 , _lowercase=1_024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50_256 , _lowercase=50_256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 707
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_lowercase = ["""bert-base-uncased""", """bert-base-cased"""]
_lowercase = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class UpperCAmelCase_ ( tf.keras.Model ):
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = tokenizer
_lowerCAmelCase = AutoConfig.from_pretrained(_lowercase )
_lowerCAmelCase = TFAutoModel.from_config(_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer(_lowercase )
_lowerCAmelCase = self.bert(**_lowercase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
_lowerCAmelCase = [
BertTokenizer.from_pretrained(_lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_lowerCAmelCase = [TFBertTokenizer.from_pretrained(_lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_lowercase , use_fast_bert_tokenizer=_lowercase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_lowerCAmelCase = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
_lowerCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowercase ( self ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase = tokenizer(_lowercase , return_tensors="""tf""" , padding="""longest""" )
_lowerCAmelCase = tf_tokenizer(_lowercase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase = tf_tokenizer(self.paired_sentences )
_lowerCAmelCase = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase = tf.function(_lowercase )
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase = tf.constant(_lowercase )
_lowerCAmelCase = compiled_tokenizer(_lowercase )
_lowerCAmelCase = tf_tokenizer(_lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase = ModelToSave(tokenizer=_lowercase )
_lowerCAmelCase = tf.convert_to_tensor(self.test_sentences )
_lowerCAmelCase = model(_lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCAmelCase = Path(_lowercase ) / """saved.model"""
model.save(_lowercase )
_lowerCAmelCase = tf.keras.models.load_model(_lowercase )
_lowerCAmelCase = loaded_model(_lowercase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 162
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase : int = parser.parse_args()
if args.model_type == "roberta":
lowerCamelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase : Optional[Any] = 'roberta'
elif args.model_type == "gpt2":
lowerCamelCase : int = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCamelCase : Union[str, Any] = 'transformer'
lowerCamelCase : Optional[int] = model.state_dict()
lowerCamelCase : Dict = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCamelCase : Union[str, Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCamelCase : Dict = f"""{prefix}.embeddings.{w}.weight"""
lowerCamelCase : Tuple = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCamelCase : Optional[Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
lowerCamelCase : Union[str, Any] = state_dict[param_name]
# Transformer Blocks #
lowerCamelCase : Union[str, Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCamelCase : Union[str, Any] = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
lowerCamelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCamelCase : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCamelCase : int = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase : List[str] = state_dict[f"""lm_head.dense.{w}"""]
lowerCamelCase : Optional[Any] = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCamelCase : Optional[Any] = state_dict[f"""{prefix}.ln_f.{w}"""]
lowerCamelCase : Optional[int] = state_dict['lm_head.weight']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 460
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : list[str] ):
'''simple docstring'''
lowercase__ = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(UpperCamelCase )
self.set_fail_transitions()
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : str ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for character in keyword:
lowercase__ = self.find_next_state(UpperCamelCase , UpperCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase__ = len(self.adlist ) - 1
else:
lowercase__ = next_state
self.adlist[current_state]["output"].append(UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCamelCase )
lowercase__ = 0
while q:
lowercase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCamelCase )
lowercase__ = self.adlist[r]['''fail_state''']
while (
self.find_next_state(UpperCamelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
lowercase__ = self.adlist[state]['''fail_state''']
lowercase__ = self.find_next_state(
UpperCamelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
lowercase__ = 0
lowercase__ = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = {} # returns a dict with keywords and list of its occurrences
lowercase__ = 0
for i in range(len(UpperCamelCase ) ):
while (
self.find_next_state(UpperCamelCase , string[i] ) is None
and current_state != 0
):
lowercase__ = self.adlist[current_state]['''fail_state''']
lowercase__ = self.find_next_state(UpperCamelCase , string[i] )
if next_state is None:
lowercase__ = 0
else:
lowercase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase__ = []
result[key].append(i - len(UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 460
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : str = "naver-clova-ix/donut-base-finetuned-docvqa"
A__ : Union[str, Any] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
A__ : List[Any] = "document_qa"
A__ : Tuple = AutoProcessor
A__ : List[str] = VisionEncoderDecoderModel
A__ : str = ["image", "text"]
A__ : List[str] = ["text"]
def __init__( self : Union[str, Any] , *_snake_case : Tuple , **_snake_case : int ):
"""simple docstring"""
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__snake_case , **__snake_case )
def _a ( self : Dict , _snake_case : "Image" , _snake_case : str ):
"""simple docstring"""
A__ = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
A__ = task_prompt.replace('{user_input}' , __snake_case )
A__ = self.pre_processor.tokenizer(
__snake_case , add_special_tokens=__snake_case , return_tensors='pt' ).input_ids
A__ = self.pre_processor(__snake_case , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _a ( self : Optional[int] , _snake_case : Tuple ):
"""simple docstring"""
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__snake_case , ).sequences
def _a ( self : Dict , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = self.pre_processor.batch_decode(__snake_case )[0]
A__ = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
A__ = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
A__ = re.sub(R'<.*?>' , '' , __snake_case , count=1 ).strip() # remove first task start token
A__ = self.pre_processor.tokenajson(__snake_case )
return sequence["answer"]
| 705
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 0
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
SCREAMING_SNAKE_CASE_ : Dict = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
SCREAMING_SNAKE_CASE_ : int = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
SCREAMING_SNAKE_CASE_ : List[Any] = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(get_home_dir() , 'models' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = original_bort._collect_params_with_prefix()
# Build our config 🤗
SCREAMING_SNAKE_CASE_ : str = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.0_2,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCamelCase_ ),
}
SCREAMING_SNAKE_CASE_ : List[str] = BertConfig.from_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE_ : List[str] = hf_param.shape
SCREAMING_SNAKE_CASE_ : List[Any] = to_torch(params[gluon_param] )
SCREAMING_SNAKE_CASE_ : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
SCREAMING_SNAKE_CASE_ : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
SCREAMING_SNAKE_CASE_ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
SCREAMING_SNAKE_CASE_ : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
SCREAMING_SNAKE_CASE_ : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
SCREAMING_SNAKE_CASE_ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
SCREAMING_SNAKE_CASE_ : BertSelfAttention = layer.attention.self
SCREAMING_SNAKE_CASE_ : Any = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
SCREAMING_SNAKE_CASE_ : Any = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
SCREAMING_SNAKE_CASE_ : str = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
SCREAMING_SNAKE_CASE_ : Any = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
SCREAMING_SNAKE_CASE_ : BertSelfOutput = layer.attention.output
SCREAMING_SNAKE_CASE_ : str = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
SCREAMING_SNAKE_CASE_ : int = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
SCREAMING_SNAKE_CASE_ : Any = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
SCREAMING_SNAKE_CASE_ : BertIntermediate = layer.intermediate
SCREAMING_SNAKE_CASE_ : Union[str, Any] = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
SCREAMING_SNAKE_CASE_ : Dict = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
SCREAMING_SNAKE_CASE_ : BertOutput = layer.output
SCREAMING_SNAKE_CASE_ : Union[str, Any] = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
SCREAMING_SNAKE_CASE_ : str = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
SCREAMING_SNAKE_CASE_ : Tuple = RobertaTokenizer.from_pretrained('roberta-base' )
SCREAMING_SNAKE_CASE_ : int = tokenizer.encode_plus(lowerCamelCase_ )['input_ids']
# Get gluon output
SCREAMING_SNAKE_CASE_ : Optional[Any] = mx.nd.array([input_ids] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : str = hf_bort_model(**lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_gluon[0].asnumpy()
SCREAMING_SNAKE_CASE_ : Optional[int] = output_hf[0].detach().numpy()
SCREAMING_SNAKE_CASE_ : Any = np.max(np.abs(hf_layer - gluon_layer ) ).item()
SCREAMING_SNAKE_CASE_ : Tuple = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 105
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : Any = (CMStochasticIterativeScheduler,)
__UpperCamelCase : Dict = 1_0
def lowercase ( self : Dict , **__a : Dict ):
snake_case__ : List[str] = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**__a )
return config
def lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = 1_0
snake_case__ : List[str] = self.get_scheduler_config()
snake_case__ : str = self.scheduler_classes[0](**__a )
scheduler.set_timesteps(__a )
snake_case__ : Tuple = scheduler.timesteps[0]
snake_case__ : Tuple = scheduler.timesteps[1]
snake_case__ : int = self.dummy_sample
snake_case__ : Union[str, Any] = 0.1 * sample
snake_case__ : Optional[Any] = scheduler.step(__a , __a , __a ).prev_sample
snake_case__ : Union[str, Any] = scheduler.step(__a , __a , __a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase ( self : Optional[int] ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__a )
def lowercase ( self : Optional[int] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__a )
def lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : Optional[int] = scheduler_class(**__a )
snake_case__ : int = 1
scheduler.set_timesteps(__a )
snake_case__ : str = scheduler.timesteps
snake_case__ : List[Any] = torch.manual_seed(0 )
snake_case__ : int = self.dummy_model()
snake_case__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__a ):
# 1. scale model input
snake_case__ : List[str] = scheduler.scale_model_input(__a , __a )
# 2. predict noise residual
snake_case__ : Dict = model(__a , __a )
# 3. predict previous sample x_t-1
snake_case__ : List[str] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
snake_case__ : Optional[Any] = pred_prev_sample
snake_case__ : str = torch.sum(torch.abs(__a ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def lowercase ( self : List[str] ):
snake_case__ : str = self.scheduler_classes[0]
snake_case__ : Dict = self.get_scheduler_config()
snake_case__ : Tuple = scheduler_class(**__a )
snake_case__ : Union[str, Any] = [1_0_6, 0]
scheduler.set_timesteps(timesteps=__a )
snake_case__ : str = scheduler.timesteps
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case__ : Tuple = scheduler.scale_model_input(__a , __a )
# 2. predict noise residual
snake_case__ : Optional[int] = model(__a , __a )
# 3. predict previous sample x_t-1
snake_case__ : List[str] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
snake_case__ : List[str] = pred_prev_sample
snake_case__ : Tuple = torch.sum(torch.abs(__a ) )
snake_case__ : Union[str, Any] = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.scheduler_classes[0]
snake_case__ : Union[str, Any] = self.get_scheduler_config()
snake_case__ : Optional[int] = scheduler_class(**__a )
snake_case__ : Dict = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(__a , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__a )
def lowercase ( self : Any ):
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config()
snake_case__ : Union[str, Any] = scheduler_class(**__a )
snake_case__ : int = [3_9, 3_0, 1_2, 1, 0]
snake_case__ : List[Any] = len(__a )
with self.assertRaises(__a , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a )
def lowercase ( self : str ):
snake_case__ : Optional[Any] = self.scheduler_classes[0]
snake_case__ : Dict = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**__a )
snake_case__ : Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__a )
| 648
| 0
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = (DDPMScheduler,)
def snake_case_ ( self : Dict , **_snake_case : List[str] ):
__lowercase : Tuple = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_snake_case )
return config
def snake_case_ ( self : Union[str, Any] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def snake_case_ ( self : Dict ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def snake_case_ ( self : Optional[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def snake_case_ ( self : Optional[int] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_snake_case )
def snake_case_ ( self : Tuple ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_snake_case )
def snake_case_ ( self : Any ):
self.check_over_configs(thresholding=_snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , )
def snake_case_ ( self : List[str] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def snake_case_ ( self : Dict ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_snake_case )
def snake_case_ ( self : Optional[Any] ):
__lowercase : int = self.scheduler_classes[0]
__lowercase : str = self.get_scheduler_config()
__lowercase : Union[str, Any] = scheduler_class(**_snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def snake_case_ ( self : Tuple ):
__lowercase : str = self.scheduler_classes[0]
__lowercase : str = self.get_scheduler_config()
__lowercase : int = scheduler_class(**_snake_case )
__lowercase : List[Any] = len(_snake_case )
__lowercase : Dict = self.dummy_model()
__lowercase : Tuple = self.dummy_sample_deter
__lowercase : List[str] = torch.manual_seed(0 )
for t in reversed(range(_snake_case ) ):
# 1. predict noise residual
__lowercase : Optional[int] = model(_snake_case , _snake_case )
# 2. predict previous mean of sample x_t-1
__lowercase : int = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase : Any = pred_prev_sample
__lowercase : str = torch.sum(torch.abs(_snake_case ) )
__lowercase : Any = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def snake_case_ ( self : Tuple ):
__lowercase : str = self.scheduler_classes[0]
__lowercase : str = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowercase : Union[str, Any] = scheduler_class(**_snake_case )
__lowercase : List[Any] = len(_snake_case )
__lowercase : List[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter
__lowercase : Dict = torch.manual_seed(0 )
for t in reversed(range(_snake_case ) ):
# 1. predict noise residual
__lowercase : Tuple = model(_snake_case , _snake_case )
# 2. predict previous mean of sample x_t-1
__lowercase : Optional[int] = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase : Any = pred_prev_sample
__lowercase : int = torch.sum(torch.abs(_snake_case ) )
__lowercase : List[Any] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def snake_case_ ( self : str ):
__lowercase : int = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : List[str] = scheduler_class(**_snake_case )
__lowercase : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_snake_case )
__lowercase : int = scheduler.timesteps
for i, timestep in enumerate(_snake_case ):
if i == len(_snake_case ) - 1:
__lowercase : Optional[Any] = -1
else:
__lowercase : List[str] = timesteps[i + 1]
__lowercase : Optional[Any] = scheduler.previous_timestep(_snake_case )
__lowercase : str = prev_t.item()
self.assertEqual(_snake_case , _snake_case )
def snake_case_ ( self : List[Any] ):
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Optional[Any] = self.get_scheduler_config()
__lowercase : Union[str, Any] = scheduler_class(**_snake_case )
__lowercase : Any = [100, 87, 50, 51, 0]
with self.assertRaises(_snake_case , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_snake_case )
def snake_case_ ( self : List[Any] ):
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : Union[str, Any] = self.get_scheduler_config()
__lowercase : int = scheduler_class(**_snake_case )
__lowercase : Dict = [100, 87, 50, 1, 0]
__lowercase : str = len(_snake_case )
with self.assertRaises(_snake_case , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_snake_case , timesteps=_snake_case )
def snake_case_ ( self : str ):
__lowercase : List[str] = self.scheduler_classes[0]
__lowercase : Optional[int] = self.get_scheduler_config()
__lowercase : int = scheduler_class(**_snake_case )
__lowercase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_snake_case , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_snake_case )
| 284
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
__lowercase : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowercase : Any = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_snake_case ) , torch_builtin(_snake_case ) ) )
self.assertFalse(torch.allclose(gelu_python(_snake_case ) , gelu_new(_snake_case ) ) )
def snake_case_ ( self : Dict ):
__lowercase : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowercase : List[str] = get_activation('''gelu''' )
__lowercase : Optional[Any] = get_activation('''gelu_10''' )
__lowercase : Tuple = torch_builtin(_snake_case )
__lowercase : List[Any] = geluaa(_snake_case )
__lowercase : Tuple = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_snake_case ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case_ ( self : Any ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_snake_case ):
get_activation('''bogus''' )
with self.assertRaises(_snake_case ):
get_activation(_snake_case )
def snake_case_ ( self : Dict ):
__lowercase : Union[str, Any] = get_activation('''gelu''' )
__lowercase : List[str] = 1
__lowercase : Union[str, Any] = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_snake_case ):
__lowercase : Tuple = acta.a
| 284
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowercase_ ):
lowercase = ['keras_nlp']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['keras_nlp'] )
| 425
|
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ):
# Initialise PyTorch model
lowercase_ : Any = FunnelConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : int = FunnelBaseModel(__SCREAMING_SNAKE_CASE ) if base_model else FunnelModel(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 425
| 1
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class snake_case__ ( lowercase_ , lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : Optional[int] = AutoencoderKL
lowerCamelCase : Union[str, Any] = "sample"
lowerCamelCase : Optional[Any] = 1E-2
@property
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :List[str] = 4
__snake_case :Any = 3
__snake_case :List[Any] = (32, 32)
__snake_case :int = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
return {"sample": image}
@property
def __lowercase ( self ) -> Dict:
'''simple docstring'''
return (3, 32, 32)
@property
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Dict = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__snake_case :Dict = self.dummy_input
return init_dict, inputs_dict
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
pass
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case :List[Any] = self.prepare_init_args_and_inputs_for_common()
__snake_case :List[Any] = self.model_class(**a__ )
model.to(a__ )
assert not model.is_gradient_checkpointing and model.training
__snake_case :Union[str, Any] = model(**a__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case :Union[str, Any] = torch.randn_like(a__ )
__snake_case :int = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case :Optional[Any] = self.model_class(**a__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(a__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case :Union[str, Any] = model_a(**a__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case :Optional[Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case :Dict = dict(model.named_parameters() )
__snake_case :Dict = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case , __snake_case :Optional[Any] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(a__ )
__snake_case :Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
__snake_case :List[Any] = model.to(a__ )
model.eval()
if torch_device == "mps":
__snake_case :Any = torch.manual_seed(0 )
else:
__snake_case :Dict = torch.Generator(device=a__ ).manual_seed(0 )
__snake_case :Dict = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case :Optional[int] = image.to(a__ )
with torch.no_grad():
__snake_case :Tuple = model(a__ , sample_posterior=a__ , generator=a__ ).sample
__snake_case :int = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case :Optional[Any] = torch.tensor(
[
-4.00_78e-01,
-3.83_23e-04,
-1.26_81e-01,
-1.14_62e-01,
2.00_95e-01,
1.08_93e-01,
-8.82_47e-02,
-3.03_61e-01,
-9.86_44e-03,
] )
elif torch_device == "cpu":
__snake_case :str = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
__snake_case :Optional[int] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(a__ , a__ , rtol=1e-2 ) )
@slow
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self , a__ , a__ ) -> Any:
'''simple docstring'''
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(a__ ) for s in shape] )}.npy'''
def __lowercase ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , a__=0 , a__=(4, 3, 5_12, 5_12) , a__=False ) -> Optional[int]:
'''simple docstring'''
__snake_case :Optional[Any] = torch.floataa if fpaa else torch.floataa
__snake_case :Optional[int] = torch.from_numpy(load_hf_numpy(self.get_file_format(a__ , a__ ) ) ).to(a__ ).to(a__ )
return image
def __lowercase ( self , a__="CompVis/stable-diffusion-v1-4" , a__=False ) -> int:
'''simple docstring'''
__snake_case :str = """fp16""" if fpaa else None
__snake_case :Optional[int] = torch.floataa if fpaa else torch.floataa
__snake_case :Optional[Any] = AutoencoderKL.from_pretrained(
a__ , subfolder="""vae""" , torch_dtype=a__ , revision=a__ , )
model.to(a__ ).eval()
return model
def __lowercase ( self , a__=0 ) -> List[str]:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(a__ )
return torch.Generator(device=a__ ).manual_seed(a__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __lowercase ( self , a__ , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Any = self.get_sd_vae_model()
__snake_case :Union[str, Any] = self.get_sd_image(a__ )
__snake_case :List[str] = self.get_generator(a__ )
with torch.no_grad():
__snake_case :str = model(a__ , generator=a__ , sample_posterior=a__ ).sample
assert sample.shape == image.shape
__snake_case :List[str] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case :Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(a__ , a__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def __lowercase ( self , a__ , a__ ) -> List[str]:
'''simple docstring'''
__snake_case :Any = self.get_sd_vae_model(fpaa=a__ )
__snake_case :Optional[int] = self.get_sd_image(a__ , fpaa=a__ )
__snake_case :List[Any] = self.get_generator(a__ )
with torch.no_grad():
__snake_case :List[Any] = model(a__ , generator=a__ , sample_posterior=a__ ).sample
assert sample.shape == image.shape
__snake_case :str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case :Any = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __lowercase ( self , a__ , a__ , a__ ) -> Tuple:
'''simple docstring'''
__snake_case :Tuple = self.get_sd_vae_model()
__snake_case :Dict = self.get_sd_image(a__ )
with torch.no_grad():
__snake_case :Tuple = model(a__ ).sample
assert sample.shape == image.shape
__snake_case :Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case :Union[str, Any] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(a__ , a__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def __lowercase ( self , a__ , a__ ) -> List[str]:
'''simple docstring'''
__snake_case :Optional[Any] = self.get_sd_vae_model()
__snake_case :int = self.get_sd_image(a__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case :str = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
__snake_case :Optional[int] = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case :str = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def __lowercase ( self , a__ , a__ ) -> List[str]:
'''simple docstring'''
__snake_case :Optional[Any] = self.get_sd_vae_model(fpaa=a__ )
__snake_case :Optional[Any] = self.get_sd_image(a__ , shape=(3, 4, 64, 64) , fpaa=a__ )
with torch.no_grad():
__snake_case :Any = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
__snake_case :Dict = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case :Any = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def __lowercase ( self , a__ ) -> List[str]:
'''simple docstring'''
__snake_case :Dict = self.get_sd_vae_model(fpaa=a__ )
__snake_case :Dict = self.get_sd_image(a__ , shape=(3, 4, 64, 64) , fpaa=a__ )
with torch.no_grad():
__snake_case :Optional[int] = model.decode(a__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case :List[Any] = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(a__ , a__ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def __lowercase ( self , a__ ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[Any] = self.get_sd_vae_model()
__snake_case :List[str] = self.get_sd_image(a__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case :Any = model.decode(a__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case :Any = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(a__ , a__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def __lowercase ( self , a__ , a__ ) -> Tuple:
'''simple docstring'''
__snake_case :str = self.get_sd_vae_model()
__snake_case :Dict = self.get_sd_image(a__ )
__snake_case :Optional[int] = self.get_generator(a__ )
with torch.no_grad():
__snake_case :List[str] = model.encode(a__ ).latent_dist
__snake_case :List[Any] = dist.sample(generator=a__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case :List[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case :Optional[int] = torch.tensor(a__ )
__snake_case :Any = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(a__ , a__ , atol=a__ )
| 291
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Tuple = "dandelin/vilt-b32-finetuned-vqa"
lowerCamelCase : List[str] = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
lowerCamelCase : Optional[Any] = "image_qa"
lowerCamelCase : str = AutoProcessor
lowerCamelCase : Union[str, Any] = AutoModelForVisualQuestionAnswering
lowerCamelCase : Any = ["image", "text"]
lowerCamelCase : Dict = ["text"]
def __init__( self , *a__ , **a__ ) -> Any:
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*a__ , **a__ )
def __lowercase ( self , a__ , a__ ) -> int:
'''simple docstring'''
return self.pre_processor(a__ , a__ , return_tensors="""pt""" )
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model(**a__ ).logits
def __lowercase ( self , a__ ) -> Tuple:
'''simple docstring'''
__snake_case :str = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 291
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
lowercase : List[Any] =sorted(string.lower() )
return len(__magic_name__ ) == len(set(__magic_name__ ) )
if __name__ == "__main__":
UpperCamelCase_ = input("""Enter a string """).strip()
UpperCamelCase_ = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 92
|
def lowercase ( _a ) -> int:
if not isinstance(_a ,_a ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
UpperCAmelCase_: List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137
| 0
|
'''simple docstring'''
def lowercase_ ( _lowercase = 1_000_000 ) -> int:
'''simple docstring'''
lowerCamelCase_ : str = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
lowerCamelCase_ : List[Any] = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 702
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase_ ( _lowercase ) -> list[list[float]]:
'''simple docstring'''
lowerCamelCase_ : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowercase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCamelCase_ : Union[str, Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCamelCase_ : Optional[Any] = [[0.0, 0.0], [0.0, 0.0]]
lowerCamelCase_, lowerCamelCase_ : int = matrix[1][1], matrix[0][0]
lowerCamelCase_, lowerCamelCase_ : Dict = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowercase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowercase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCamelCase_ : Tuple = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowerCamelCase_ : Any = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCamelCase_ : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCamelCase_ : Any = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCamelCase_ : Tuple = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCamelCase_ : Optional[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCamelCase_ : Tuple = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCamelCase_ : List[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCamelCase_ : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCamelCase_ : Tuple = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCamelCase_ : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCamelCase_ : str = array(_lowercase )
for i in range(3 ):
for j in range(3 ):
lowerCamelCase_ : int = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCamelCase_ : int = array(_lowercase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowercase )
# Calculate the inverse of the matrix
return [[float(d(_lowercase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 357
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
_SCREAMING_SNAKE_CASE : Any = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
_SCREAMING_SNAKE_CASE : List[Any] = {
"allenai/longformer-base-4096": 40_96,
"allenai/longformer-large-4096": 40_96,
"allenai/longformer-large-4096-finetuned-triviaqa": 40_96,
"allenai/longformer-base-4096-extra.pos.embd.only": 40_96,
"allenai/longformer-large-4096-extra.pos.embd.only": 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = (
list(range(ord('''!''' ) ,ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) ,ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) ,ord('''ÿ''' ) + 1 ) )
)
snake_case = bs[:]
snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
snake_case = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ ,UpperCamelCase_ ) )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = set()
snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case = char
return pairs
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['input_ids', 'attention_mask']
def __init__( self , __snake_case , __snake_case , __snake_case="replace" , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case=False , **__snake_case , ):
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
snake_case = json.load(__snake_case )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = errors # how to handle errors in decoding
snake_case = bytes_to_unicode()
snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(__snake_case , encoding='''utf-8''' ) as merges_handle:
snake_case = merges_handle.read().split('''\n''' )[1:-1]
snake_case = [tuple(merge.split() ) for merge in bpe_merges]
snake_case = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case = {}
snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def a_ ( self ):
return len(self.encoder )
def a_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a_ ( self , __snake_case ):
if token in self.cache:
return self.cache[token]
snake_case = tuple(__snake_case )
snake_case = get_pairs(__snake_case )
if not pairs:
return token
while True:
snake_case = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case , snake_case = bigram
snake_case = []
snake_case = 0
while i < len(__snake_case ):
try:
snake_case = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case = tuple(__snake_case )
snake_case = new_word
if len(__snake_case ) == 1:
break
else:
snake_case = get_pairs(__snake_case )
snake_case = ''' '''.join(__snake_case )
snake_case = word
return word
def a_ ( self , __snake_case ):
snake_case = []
for token in re.findall(self.pat , __snake_case ):
snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(''' ''' ) )
return bpe_tokens
def a_ ( self , __snake_case ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def a_ ( self , __snake_case ):
return self.decoder.get(__snake_case )
def a_ ( self , __snake_case ):
snake_case = ''''''.join(__snake_case )
snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def a_ ( self , __snake_case , __snake_case = None ):
if not os.path.isdir(__snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' )
snake_case = 0
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
snake_case = token_index
writer.write(''' '''.join(__snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def a_ ( self , __snake_case , __snake_case = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case = [self.cls_token_id]
snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self , __snake_case , __snake_case = None , __snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def a_ ( self , __snake_case , __snake_case = None ):
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a_ ( self , __snake_case , __snake_case=False , **__snake_case ):
snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()):
snake_case = ''' ''' + text
return (text, kwargs)
| 550
|
class A__ :
"""simple docstring"""
def __init__( self , __snake_case ):
snake_case = n
snake_case = [None] * self.n
snake_case = 0 # index of the first element
snake_case = 0
snake_case = 0
def __len__( self ):
return self.size
def a_ ( self ):
return self.size == 0
def a_ ( self ):
return False if self.is_empty() else self.array[self.front]
def a_ ( self , __snake_case ):
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
snake_case = data
snake_case = (self.rear + 1) % self.n
self.size += 1
return self
def a_ ( self ):
if self.size == 0:
raise Exception('''UNDERFLOW''' )
snake_case = self.array[self.front]
snake_case = None
snake_case = (self.front + 1) % self.n
self.size -= 1
return temp
| 550
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '▁'
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowerCAmelCase__ = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowerCAmelCase__ = {'vinai/bartpho-syllable': 10_24}
class _A ( UpperCamelCase_ ):
'''simple docstring'''
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
def __init__( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Dict="<s>" , lowerCamelCase : str="</s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Tuple="<s>" , lowerCamelCase : str="<unk>" , lowerCamelCase : Dict="<pad>" , lowerCamelCase : Tuple="<mask>" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : Tuple , )-> Tuple:
snake_case__ : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
snake_case__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
snake_case__ : Optional[int] = vocab_file
snake_case__ : Any = monolingual_vocab_file
snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase__ ) not in self.fairseq_tokens_to_ids:
snake_case__ : str = cnt
cnt += 1
with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
snake_case__ : Union[str, Any] = line.strip().split()[0]
snake_case__ : int = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase__ ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any )-> List[Any]:
snake_case__ : int = self.__dict__.copy()
snake_case__ : Tuple = None
snake_case__ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , lowerCamelCase : Dict )-> Any:
snake_case__ : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ : List[Any] = {}
snake_case__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self : Optional[int] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None )-> int:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Dict = [self.cls_token_id]
snake_case__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False )-> str:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def __lowerCAmelCase ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None )-> Union[str, Any]:
snake_case__ : Union[str, Any] = [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self : int )-> Any:
return len(self.fairseq_ids_to_tokens )
def __lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case__ : Any = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : str )-> Optional[int]:
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] , lowerCamelCase : Optional[int] )-> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __lowerCAmelCase ( self : str , lowerCamelCase : List[Any] )-> List[str]:
return self.fairseq_ids_to_tokens[index]
def __lowerCAmelCase ( self : Any , lowerCamelCase : int )-> Any:
snake_case__ : Tuple = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip()
return out_string
def __lowerCAmelCase ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None )-> Dict:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , """wb""" ) as fi:
snake_case__ : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(UpperCamelCase__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 712
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase__ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : Path , lowerCamelCase : Union[str, None] = None , lowerCamelCase : Union[List[str], None] = None , lowerCamelCase : Union[str, List[str], None] = None , lowerCamelCase : bool = True , )-> Dict:
snake_case__ : int = [file for file in os.listdir(lowerCamelCase ) if os.path.isfile(os.path.join(lowerCamelCase , lowerCamelCase ) )]
if identifier is not None:
snake_case__ : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase , lowerCamelCase ):
for n_ in n_identifier:
snake_case__ : Union[str, Any] = [file for file in files if n_ not in file]
else:
snake_case__ : Optional[Any] = [file for file in files if n_identifier not in file]
snake_case__ : Tuple = ignore_files or []
ignore_files.append("""__init__.py""" )
snake_case__ : int = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , lowerCamelCase )
if only_modules:
snake_case__ : Union[str, Any] = file.split(""".""" )[0]
try:
snake_case__ : Any = getattr(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = doctest.DocTestSuite(lowerCamelCase )
snake_case__ : int = unittest.TextTestRunner().run(lowerCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
snake_case__ : List[Any] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __lowerCAmelCase ( self : Tuple )-> List[str]:
snake_case__ : Optional[int] = Path("""src/transformers""" )
snake_case__ : Optional[Any] = """modeling"""
snake_case__ : Optional[Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowerCamelCase , identifier=lowerCamelCase , ignore_files=lowerCamelCase )
def __lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
snake_case__ : Optional[Any] = Path("""src/transformers""" )
snake_case__ : Any = """tokenization"""
self.analyze_directory(lowerCamelCase , identifier=lowerCamelCase )
def __lowerCAmelCase ( self : Dict )-> Dict:
snake_case__ : Any = Path("""src/transformers""" )
snake_case__ : List[Any] = """configuration"""
self.analyze_directory(lowerCamelCase , identifier=lowerCamelCase )
def __lowerCAmelCase ( self : Dict )-> Tuple:
snake_case__ : int = Path("""src/transformers""" )
snake_case__ : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowerCamelCase , n_identifier=lowerCamelCase )
def __lowerCAmelCase ( self : Union[str, Any] )-> Tuple:
snake_case__ : List[Any] = Path("""docs/source""" )
snake_case__ : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(lowerCamelCase , ignore_files=lowerCamelCase , only_modules=lowerCamelCase )
| 172
| 0
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
snake_case__ = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=__lowerCAmelCase , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=__lowerCAmelCase , default=5 )
parser.add_argument('''--batch_size''' , type=__lowerCAmelCase , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=__lowerCAmelCase , default=1 )
parser.add_argument('''--freeze''' , type=__lowerCAmelCase , default=__lowerCAmelCase )
parser.add_argument('''--learning_rate''' , type=__lowerCAmelCase , default=5e-4 )
parser.add_argument('''--seed''' , type=__lowerCAmelCase , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=__lowerCAmelCase , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=__lowerCAmelCase , default=10 )
parser.add_argument('''--weight_decay''' , type=__lowerCAmelCase , default=0.01 )
parser.add_argument('''--output_dir''' , type=__lowerCAmelCase , default='''./results''' )
return parser.parse_args()
lowerCamelCase__ : Tuple = load("""accuracy""")
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
snake_case__ , snake_case__ = eval_pred
snake_case__ = np.argmax(__lowerCAmelCase , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=__lowerCAmelCase )
class __magic_name__ (SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:int ):
super().__init__()
snake_case__ = trainer
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Dict , _a:Union[str, Any] , _a:str , **_a:str ):
if control.should_evaluate:
snake_case__ = deepcopy(UpperCAmelCase_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def SCREAMING_SNAKE_CASE ( ) -> Dict:
snake_case__ = get_args()
set_seed(args.seed )
snake_case__ = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
snake_case__ = dataset.train_test_split(test_size=0.2 )
snake_case__ = train_test['''test'''].train_test_split(test_size=0.5 )
snake_case__ = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
snake_case__ = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case__ = tokenizer.eos_token
snake_case__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
snake_case__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
snake_case__ = False
snake_case__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(__lowerCAmelCase ):
snake_case__ = tokenizer(example['''src'''] , truncation=__lowerCAmelCase , max_length=1024 )
snake_case__ = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
snake_case__ = train_test_validation.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=train_test_validation['''train'''].column_names , )
snake_case__ = DataCollatorWithPadding(tokenizer=__lowerCAmelCase )
snake_case__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
snake_case__ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , )
print('''Training...''' )
trainer.add_callback(CustomCallback(__lowerCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 33
|
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=7_68 ):
super().__init__(UpperCAmelCase_ )
snake_case_ = proj_size
snake_case_ = CLIPVisionModel(UpperCAmelCase_ )
snake_case_ = PaintByExampleMapper(UpperCAmelCase_ )
snake_case_ = nn.LayerNorm(config.hidden_size )
snake_case_ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
snake_case_ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_=False ):
snake_case_ = self.model(pixel_values=UpperCAmelCase_ )
snake_case_ = clip_output.pooler_output
snake_case_ = self.mapper(latent_states[:, None] )
snake_case_ = self.final_layer_norm(UpperCAmelCase_ )
snake_case_ = self.proj_out(UpperCAmelCase_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase_ ):
super().__init__()
snake_case_ = (config.num_hidden_layers + 1) // 5
snake_case_ = config.hidden_size
snake_case_ = 1
snake_case_ = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , activation_fn="gelu" , attention_bias=UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ )
] )
def _lowercase ( self , UpperCAmelCase_ ):
for block in self.blocks:
snake_case_ = block(UpperCAmelCase_ )
return hidden_states
| 508
| 0
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = '''src/transformers'''
lowerCamelCase__ = '''docs/source/en'''
lowerCamelCase__ = '''.'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : int = f.readlines()
# Find the start prompt.
_UpperCamelCase : int = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
_UpperCamelCase : Union[str, Any] = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : int = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,__UpperCamelCase )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = 2 if text == """✅""" or text == """❌""" else len(__UpperCamelCase )
_UpperCamelCase : Tuple = (width - text_length) // 2
_UpperCamelCase : Any = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : List[str] = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : Dict = collections.defaultdict(__UpperCamelCase )
_UpperCamelCase : List[Any] = collections.defaultdict(__UpperCamelCase )
_UpperCamelCase : str = collections.defaultdict(__UpperCamelCase )
_UpperCamelCase : str = collections.defaultdict(__UpperCamelCase )
_UpperCamelCase : Dict = collections.defaultdict(__UpperCamelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(__UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Dict = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : str = attr_name[:-13]
elif _re_tf_models.match(__UpperCamelCase ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Tuple = _re_tf_models.match(__UpperCamelCase ).groups()[0]
elif _re_flax_models.match(__UpperCamelCase ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Tuple = _re_flax_models.match(__UpperCamelCase ).groups()[0]
elif _re_pt_models.match(__UpperCamelCase ) is not None:
_UpperCamelCase : List[Any] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(__UpperCamelCase ).groups()[0]
if lookup_dict is not None:
while len(__UpperCamelCase ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Any = True
break
# Try again after removing the last word in the name
_UpperCamelCase : Tuple = """""".join(camel_case_split(__UpperCamelCase )[:-1] )
# Let's build that table!
_UpperCamelCase : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : str = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : str = [len(__UpperCamelCase ) + 2 for c in columns]
_UpperCamelCase : Dict = max([len(__UpperCamelCase ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : List[Any] = """|""" + """|""".join([_center_text(__UpperCamelCase ,__UpperCamelCase ) for c, w in zip(__UpperCamelCase ,__UpperCamelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : List[Any] = {True: """✅""", False: """❌"""}
for name in model_names:
_UpperCamelCase : Union[str, Any] = model_name_to_prefix[name]
_UpperCamelCase : Optional[int] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__UpperCamelCase ,__UpperCamelCase ) for l, w in zip(__UpperCamelCase ,__UpperCamelCase )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : int = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__UpperCamelCase ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 721
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 0
|
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __magic_name__ ( _lowerCamelCase: Optional[int], _lowerCamelCase: Dict=None ) -> List[str]:
'''simple docstring'''
require_version(deps[pkg], UpperCamelCase__ )
| 535
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : str =['''input_features''']
def __init__( self: Dict , __a: Dict=80 , __a: str=16_000 , __a: int=160 , __a: Tuple=30 , __a: List[str]=400 , __a: Union[str, Any]=0.0 , __a: str=False , **__a: List[Any] , )-> Union[str, Any]:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
lowerCamelCase : List[str] = n_fft
lowerCamelCase : Optional[int] = hop_length
lowerCamelCase : List[Any] = chunk_length
lowerCamelCase : Tuple = chunk_length * sampling_rate
lowerCamelCase : Optional[Any] = self.n_samples // hop_length
lowerCamelCase : int = sampling_rate
lowerCamelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__a , norm="""slaney""" , mel_scale="""slaney""" , )
def a__ ( self: int , __a: np.array )-> np.ndarray:
lowerCamelCase : Tuple = spectrogram(
__a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCamelCase : Union[str, Any] = log_spec[:, :-1]
lowerCamelCase : Optional[Any] = np.maximum(__a , log_spec.max() - 8.0 )
lowerCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a__ ( __a: List[np.ndarray] , __a: List[np.ndarray] , __a: float = 0.0 )-> List[np.ndarray]:
if attention_mask is not None:
lowerCamelCase : int = np.array(__a , np.intaa )
lowerCamelCase : Any = []
for vector, length in zip(__a , attention_mask.sum(-1 ) ):
lowerCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase : Tuple = padding_value
normed_input_values.append(__a )
else:
lowerCamelCase : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self: str , __a: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a: bool = True , __a: Optional[int] = None , __a: Optional[Union[str, TensorType]] = None , __a: Optional[bool] = None , __a: Optional[str] = "max_length" , __a: Optional[int] = None , __a: Optional[int] = None , __a: Optional[bool] = None , **__a: List[Any] , )-> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase : int = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase : Optional[int] = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase : Tuple = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
lowerCamelCase : str = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase : List[Any] = [np.asarray([raw_speech] ).T]
lowerCamelCase : Optional[int] = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
lowerCamelCase : Tuple = self.pad(
__a , padding=__a , max_length=max_length if max_length else self.n_samples , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase : int = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCamelCase : Any = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
lowerCamelCase : int = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
lowerCamelCase : str = [self._np_extract_fbank_features(__a ) for waveform in input_features[0]]
if isinstance(input_features[0] , __a ):
lowerCamelCase : List[Any] = [np.asarray(__a , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase : int = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase : Dict = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(__a )
return padded_inputs
def a__ ( self: Optional[int] )-> Dict[str, Any]:
lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 222
| 0
|
'''simple docstring'''
__lowerCamelCase : Tuple = """Input must be a string of 8 numbers plus letter"""
__lowerCamelCase : Any = """TRWAGMYFPDXBNJZSQVHLCKE"""
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase_ : Optional[Any] = F"""Expected string as input, found {type(__UpperCAmelCase ).__name__}"""
raise TypeError(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = spanish_id.replace('''-''' , '''''' ).upper()
if len(__UpperCAmelCase ) != 9:
raise ValueError(__UpperCAmelCase )
try:
lowerCamelCase_ : Union[str, Any] = int(spanish_id_clean[0:8] )
lowerCamelCase_ : Tuple = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCAmelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 418
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowerCamelCase_ : str = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
lowerCamelCase_ : Dict = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCamelCase_ : str = model(UpperCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
lowerCamelCase_ : Union[str, Any] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
lowerCamelCase_ : Tuple = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase_ : Optional[int] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCamelCase_ : str = model(UpperCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) )
| 418
| 1
|
"""simple docstring"""
import requests
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {"""Content-Type""": """application/json"""}
lowerCAmelCase__ = requests.post(lowerCAmelCase__ , json={"""text""": message_body} , headers=lowerCAmelCase__ )
if response.status_code != 200:
lowerCAmelCase__ = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 644
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29
| 0
|
from __future__ import annotations
from math import pow, sqrt
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675
|
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675
| 1
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase : Tuple = logging.getLogger(__name__)
class a__ :
def __init__( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: str = False
def lowerCAmelCase ( self : Tuple , A_ : str , A_ : Dict , A_ : List[str] , A_ : List[Any] ) -> Dict:
"""simple docstring"""
if not self.initialized:
lowerCamelCase_: Union[str, Any] = RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
lowerCamelCase_: List[str] = True
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.retriever.index.init_index()
def lowerCAmelCase ( self : Any , A_ : Optional[Any] , A_ : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: int = self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class a__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] , A_ : Dict , A_ : List[Any] , A_ : Tuple , A_ : Optional[Any] , A_ : int=None ) -> Optional[Any]:
"""simple docstring"""
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
lowerCamelCase_: Optional[int] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCAmelCase ( self : int , A_ : str , A_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCamelCase_: Dict = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCamelCase_ , lowerCamelCase_: List[str] = ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
lowerCamelCase_ , lowerCamelCase_: Optional[int] = self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def lowerCAmelCase ( cls : Tuple , A_ : Tuple , A_ : Tuple=None , **A_ : List[str] ) -> List[str]:
"""simple docstring"""
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def lowerCAmelCase ( cls : str , A_ : List[Any] , A_ : Optional[int] , A_ : str=None , **A_ : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_: Optional[int] = kwargs.pop("""config""" , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
lowerCamelCase_: Union[str, Any] = RagTokenizer.from_pretrained(A_ , config=A_ )
lowerCamelCase_: List[Any] = rag_tokenizer.question_encoder
lowerCamelCase_: List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCamelCase_: Any = """custom"""
lowerCamelCase_: Any = CustomHFIndex(config.retrieval_vector_size , A_ )
else:
lowerCamelCase_: Tuple = cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 423
|
def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: Any = current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
lowerCamelCase_: Optional[Any] = row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
lowerCamelCase_: Union[str, Any] = column
continue
lowerCamelCase_: Any = column / magnitude
# Subtract to cancel term
lowerCamelCase_: str = current_set[0]
lowerCamelCase_: Union[str, Any] = [first_row]
lowerCamelCase_: Optional[int] = current_set[1::]
for row in current_set:
lowerCamelCase_: List[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCamelCase_: Dict = final_set[0]
lowerCamelCase_: List[str] = []
lowerCamelCase_: Union[str, Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCamelCase_: Any = simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _UpperCAmelCase )
lowerCamelCase_: Dict = resultant
return final_set
def UpperCAmelCase_ ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowerCamelCase_: Optional[Any] = len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCamelCase_: Tuple = equations.copy()
if any(0 in row for row in data_set ):
lowerCamelCase_: Tuple = data_set.copy()
lowerCamelCase_: Tuple = []
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
lowerCamelCase_: Optional[int] = data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , _UpperCAmelCase )
lowerCamelCase_: List[Any] = data_set.copy()
lowerCamelCase_: str = simplify(_UpperCAmelCase )
lowerCamelCase_: Union[str, Any] = simplified[::-1]
lowerCamelCase_: list = []
for row in simplified:
lowerCamelCase_: Any = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCamelCase_: List[str] = row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
lowerCamelCase_: Optional[int] = temp_row[1::]
lowerCamelCase_: Optional[Any] = temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
lowerCamelCase_: List[Any] = []
for item in solutions:
final.append(float(round(_UpperCAmelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : str = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 423
| 1
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( _A: Optional[int] , _A: Optional[Any] , _A: Union[str, Any] , _A: Any="attention" ):
'''simple docstring'''
__lowerCamelCase = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__lowerCamelCase = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__lowerCamelCase = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__lowerCamelCase = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def UpperCamelCase__ ( _A: Dict , _A: Union[str, Any] , _A: Any , _A: Any=False ):
'''simple docstring'''
if split_mlp_wi:
__lowerCamelCase = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__lowerCamelCase = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__lowerCamelCase = (wi_a, wi_a)
else:
__lowerCamelCase = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
__lowerCamelCase = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def UpperCamelCase__ ( _A: Optional[Any] , _A: str , _A: Tuple , _A: Tuple ):
'''simple docstring'''
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def UpperCamelCase__ ( _A: dict , *, _A: int , _A: bool ):
'''simple docstring'''
__lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] )
__lowerCamelCase = {"""/""".join(_A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowerCamelCase = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , _A )
__lowerCamelCase = collections.OrderedDict()
# Shared embeddings.
__lowerCamelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
__lowerCamelCase = tax_layer_norm_lookup(_A , _A , """encoder""" , """pre_attention_layer_norm""" )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = tax_attention_lookup(_A , _A , """encoder""" , """attention""" )
__lowerCamelCase = layer_norm
__lowerCamelCase = k.T
__lowerCamelCase = o.T
__lowerCamelCase = q.T
__lowerCamelCase = v.T
# Block i, layer 1 (MLP).
__lowerCamelCase = tax_layer_norm_lookup(_A , _A , """encoder""" , """pre_mlp_layer_norm""" )
__lowerCamelCase , __lowerCamelCase = tax_mlp_lookup(_A , _A , """encoder""" , _A )
__lowerCamelCase = layer_norm
if split_mlp_wi:
__lowerCamelCase = wi[0].T
__lowerCamelCase = wi[1].T
else:
__lowerCamelCase = wi.T
__lowerCamelCase = wo.T
__lowerCamelCase = old[
"""encoder/relpos_bias/rel_embedding"""
].T
__lowerCamelCase = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
__lowerCamelCase = tax_layer_norm_lookup(_A , _A , """decoder""" , """pre_self_attention_layer_norm""" )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = tax_attention_lookup(_A , _A , """decoder""" , """self_attention""" )
__lowerCamelCase = layer_norm
__lowerCamelCase = k.T
__lowerCamelCase = o.T
__lowerCamelCase = q.T
__lowerCamelCase = v.T
# Block i, layer 1 (Cross Attention).
__lowerCamelCase = tax_layer_norm_lookup(_A , _A , """decoder""" , """pre_cross_attention_layer_norm""" )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = tax_attention_lookup(_A , _A , """decoder""" , """encoder_decoder_attention""" )
__lowerCamelCase = layer_norm
__lowerCamelCase = k.T
__lowerCamelCase = o.T
__lowerCamelCase = q.T
__lowerCamelCase = v.T
# Block i, layer 2 (MLP).
__lowerCamelCase = tax_layer_norm_lookup(_A , _A , """decoder""" , """pre_mlp_layer_norm""" )
__lowerCamelCase , __lowerCamelCase = tax_mlp_lookup(_A , _A , """decoder""" , _A )
__lowerCamelCase = layer_norm
if split_mlp_wi:
__lowerCamelCase = wi[0].T
__lowerCamelCase = wi[1].T
else:
__lowerCamelCase = wi.T
__lowerCamelCase = wo.T
__lowerCamelCase = old["""decoder/decoder_norm/scale"""]
__lowerCamelCase = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowerCamelCase = old["""decoder/logits_dense/kernel"""].T
return new
def UpperCamelCase__ ( _A: Optional[int] , _A: bool ):
'''simple docstring'''
__lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowerCamelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowerCamelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__lowerCamelCase = state_dict["""shared.weight"""]
return state_dict
def UpperCamelCase__ ( _A: Union[str, Any] , _A: Tuple , _A: List[str] , _A: Optional[int] ):
'''simple docstring'''
__lowerCamelCase = checkpoints.load_tax_checkpoint(_A )
__lowerCamelCase = convert_tax_to_pytorch(_A , num_layers=config.num_layers , is_encoder_only=_A )
__lowerCamelCase = make_state_dict(_A , _A )
model.load_state_dict(_A , strict=_A )
def UpperCamelCase__ ( _A: Union[str, Any] , _A: Optional[int] , _A: Dict , _A: bool = False ):
'''simple docstring'''
__lowerCamelCase = TaConfig.from_json_file(_A )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowerCamelCase = TaEncoderModel(_A )
else:
__lowerCamelCase = TaForConditionalGeneration(_A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_A , _A , _A , _A )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_A )
# Verify that we can load the checkpoint.
model.from_pretrained(_A )
print("""Done""" )
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
_a : Dict = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 718
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
__lowerCamelCase = parent
__lowerCamelCase = 1_3
__lowerCamelCase = 7
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = 9_9
__lowerCamelCase = 3_2
__lowerCamelCase = 2
__lowerCamelCase = 4
__lowerCamelCase = 3_7
__lowerCamelCase = """gelu"""
__lowerCamelCase = 0.1
__lowerCamelCase = 0.1
__lowerCamelCase = 5_1_2
__lowerCamelCase = 1_6
__lowerCamelCase = 2
__lowerCamelCase = 0.02
__lowerCamelCase = 3
__lowerCamelCase = 4
__lowerCamelCase = None
def lowerCamelCase_ ( self ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = TFRoFormerModel(config=UpperCAmelCase )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(UpperCAmelCase )
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = True
__lowerCamelCase = TFRoFormerForCausalLM(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = TFRoFormerForMaskedLM(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFRoFormerForSequenceClassification(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFRoFormerForMultipleChoice(config=UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFRoFormerForTokenClassification(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
"""simple docstring"""
A = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCamelCase_ ( self ):
__lowerCamelCase = TFRoFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def lowerCamelCase_ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__lowerCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase = model(UpperCAmelCase )[0]
# TODO Replace vocab size
__lowerCamelCase = 5_0_0_0_0
__lowerCamelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__lowerCamelCase = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
A = 1e-4
def lowerCamelCase_ ( self ):
__lowerCamelCase = tf.constant([[4, 1_0]] )
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__lowerCamelCase = emba(input_ids.shape )
__lowerCamelCase = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def lowerCamelCase_ ( self ):
__lowerCamelCase = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__lowerCamelCase = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
A = 1e-4
def lowerCamelCase_ ( self ):
# 2,12,16,64
__lowerCamelCase = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__lowerCamelCase = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__lowerCamelCase = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__lowerCamelCase , __lowerCamelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
__lowerCamelCase = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 571
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
SCREAMING_SNAKE_CASE :Optional[Any] = random.Random()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None )-> Optional[Any]:
"""simple docstring"""
if rng is None:
UpperCamelCase_ = global_rng
UpperCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __magic_name__ ( unittest.TestCase ):
def __init__( self , _lowercase , _lowercase=7 , _lowercase=400 , _lowercase=2_000 , _lowercase=24 , _lowercase=24 , _lowercase=0.0 , _lowercase=16_000 , _lowercase=True , _lowercase=True , )-> List[str]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = min_seq_length
UpperCamelCase_ = max_seq_length
UpperCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_ = feature_size
UpperCamelCase_ = num_mel_bins
UpperCamelCase_ = padding_value
UpperCamelCase_ = sampling_rate
UpperCamelCase_ = return_attention_mask
UpperCamelCase_ = do_normalize
def UpperCAmelCase_ ( self )-> Any:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase_ ( self , _lowercase=False , _lowercase=False )-> int:
def _flatten(_lowercase ):
return list(itertools.chain(*_lowercase ) )
if equal_length:
UpperCamelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_ = [np.asarray(_lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = SpeechaTextFeatureExtractionTester(self )
def UpperCAmelCase_ ( self , _lowercase )-> Tuple:
self.assertTrue(np.all(np.mean(_lowercase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowercase , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCAmelCase_ ( self )-> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase_ = [np.asarray(_lowercase ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase_ = feature_extractor(_lowercase , padding=_lowercase , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
UpperCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# Test batched
UpperCamelCase_ = feature_extractor(_lowercase , return_tensors="np" ).input_features
UpperCamelCase_ = feature_extractor(_lowercase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase_ = np.asarray(_lowercase )
UpperCamelCase_ = feature_extractor(_lowercase , return_tensors="np" ).input_features
UpperCamelCase_ = feature_extractor(_lowercase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase_ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase_ = [None, 16, None]
for max_length, padding in zip(_lowercase , _lowercase ):
UpperCamelCase_ = feature_extractor(
_lowercase , padding=_lowercase , max_length=_lowercase , return_attention_mask=_lowercase )
UpperCamelCase_ = inputs.input_features
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = [np.sum(_lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase_ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase_ = [None, 16, None]
for max_length, padding in zip(_lowercase , _lowercase ):
UpperCamelCase_ = feature_extractor(
_lowercase , max_length=_lowercase , padding=_lowercase , return_tensors="np" , return_attention_mask=_lowercase )
UpperCamelCase_ = inputs.input_features
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = [np.sum(_lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase_ = feature_extractor(
_lowercase , padding="max_length" , max_length=4 , truncation=_lowercase , return_tensors="np" , return_attention_mask=_lowercase , )
UpperCamelCase_ = inputs.input_features
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase_ = feature_extractor(
_lowercase , padding="longest" , max_length=4 , truncation=_lowercase , return_tensors="np" , return_attention_mask=_lowercase , )
UpperCamelCase_ = inputs.input_features
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase_ = feature_extractor(
_lowercase , padding="longest" , max_length=16 , truncation=_lowercase , return_tensors="np" , return_attention_mask=_lowercase , )
UpperCamelCase_ = inputs.input_features
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def UpperCAmelCase_ ( self )-> int:
import torch
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase_ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase_ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase_ ( self , _lowercase )-> str:
from datasets import load_dataset
UpperCamelCase_ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase_ = ds.sort("id" ).select(range(_lowercase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase_ ( self )-> List[Any]:
# fmt: off
UpperCamelCase_ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase_ = self._load_datasamples(1 )
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = feature_extractor(_lowercase , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _lowercase , atol=1e-4 ) )
| 628
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> str:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
UpperCamelCase_ = False
if num < 0:
UpperCamelCase_ = True
UpperCamelCase_ = -num
UpperCamelCase_ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
A : str = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float , **__magic_name__ : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = kwargs.pop("padding_side" , "right" )
SCREAMING_SNAKE_CASE_ = kwargs.pop("return_attention_mask" , __magic_name__ )
super().__init__(**__magic_name__ )
def __A ( self : List[str] , __magic_name__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __magic_name__ : Union[bool, str, PaddingStrategy] = True , __magic_name__ : Optional[int] = None , __magic_name__ : bool = False , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__magic_name__ ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE_ = required_input[0]
if isinstance(__magic_name__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE_ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = "tf"
elif is_torch_tensor(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = "pt"
elif isinstance(__magic_name__ , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE_ = "np"
else:
raise ValueError(
F'''type of {first_element} unknown: {type(__magic_name__ )}. '''
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE_ = to_numpy(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = [to_numpy(__magic_name__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE_ = self._get_padding_strategies(padding=__magic_name__ , max_length=__magic_name__ )
SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE_ = len(__magic_name__ )
if not all(len(__magic_name__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
SCREAMING_SNAKE_CASE_ = []
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE_ = self._truncate(
__magic_name__ , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , truncation=__magic_name__ , )
truncated_inputs.append(__magic_name__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE_ = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE_ = {}
for i in range(__magic_name__ ):
# padding
SCREAMING_SNAKE_CASE_ = self._pad(
truncated_inputs[i] , max_length=__magic_name__ , padding_strategy=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE_ = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = value.astype(np.floataa )
batch_outputs[key].append(__magic_name__ )
return BatchFeature(__magic_name__ , tensor_type=__magic_name__ )
def __A ( self : Any , __magic_name__ : Union[Dict[str, np.ndarray], BatchFeature] , __magic_name__ : Optional[int] = None , __magic_name__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , ) -> dict:
SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE_ = len(__magic_name__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__magic_name__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE_ = np.ones(len(__magic_name__ ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE_ = max_length - len(__magic_name__ )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE_ = np.pad(
processed_features["attention_mask"] , (0, difference) )
SCREAMING_SNAKE_CASE_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE_ = np.pad(
__magic_name__ , __magic_name__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE_ = np.pad(
processed_features["attention_mask"] , (difference, 0) )
SCREAMING_SNAKE_CASE_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE_ = np.pad(
__magic_name__ , __magic_name__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __A ( self : Optional[Any] , __magic_name__ : Union[Dict[str, np.ndarray], BatchFeature] , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , ) -> str:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE_ = len(__magic_name__ ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE_ = processed_features["attention_mask"][:max_length]
return processed_features
def __A ( self : Optional[int] , __magic_name__ : List[Any]=False , __magic_name__ : int=None ) -> Dict:
# Get padding strategy
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = PaddingStrategy(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = padding
else:
SCREAMING_SNAKE_CASE_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 356
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[Any] = logging.get_logger(__name__)
A : List[Any] = torch.device("cpu")
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
def a__ ( __UpperCamelCase ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
for k in state_dict.keys():
SCREAMING_SNAKE_CASE_ = k
if ".pwconv" in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
SCREAMING_SNAKE_CASE_ = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
SCREAMING_SNAKE_CASE_ = k_new.split("." )
if ls[2].isdigit():
SCREAMING_SNAKE_CASE_ = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
SCREAMING_SNAKE_CASE_ = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE_ = 1_0_0_0
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4]
SCREAMING_SNAKE_CASE_ = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6]
SCREAMING_SNAKE_CASE_ = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
SCREAMING_SNAKE_CASE_ = [4, 3, 1_0, 5]
SCREAMING_SNAKE_CASE_ = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
SCREAMING_SNAKE_CASE_ = [4, 4, 1_2, 6]
SCREAMING_SNAKE_CASE_ = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="cpu" , check_hash=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )
SCREAMING_SNAKE_CASE_ = checkpoint
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(__UpperCamelCase ).eval()
hf_model.load_state_dict(__UpperCamelCase )
# prepare test inputs
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained("preprocessor_config" )
SCREAMING_SNAKE_CASE_ = processor(images=__UpperCamelCase , return_tensors="pt" )
# compare outputs from both models
SCREAMING_SNAKE_CASE_ = get_expected_output(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , __UpperCamelCase , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
A : List[Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 356
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
_a : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(_a , '''depth_multiplier''' ) )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=3_2 , _a=0.25 , _a=8 , _a=True , _a=1_0_2_4 , _a=3_2 , _a="relu6" , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=1_0 , _a=None , ) -> Any:
_a : List[str] = parent
_a : Tuple = batch_size
_a : int = num_channels
_a : Dict = image_size
_a : Any = depth_multiplier
_a : Tuple = min_depth
_a : List[Any] = tf_padding
_a : Optional[int] = int(last_hidden_size * depth_multiplier )
_a : List[str] = output_stride
_a : Dict = hidden_act
_a : Optional[int] = classifier_dropout_prob
_a : str = use_labels
_a : Optional[int] = is_training
_a : Dict = num_labels
_a : Union[str, Any] = initializer_range
_a : Optional[Any] = scope
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : List[Any] = None
_a : Any = None
if self.use_labels:
_a : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self ) -> Union[str, Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , _a , _a , _a , _a ) -> Dict:
_a : Union[str, Any] = MobileNetVaModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[Any] = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowercase ( self , _a , _a , _a , _a ) -> Dict:
_a : Any = self.num_labels
_a : str = MobileNetVaForImageClassification(_a )
model.to(_a )
model.eval()
_a : int = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Dict:
_a : Dict = self.prepare_config_and_inputs()
_a , _a , _a , _a : Tuple = config_and_inputs
_a : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> str:
_a : Optional[Any] = MobileNetVaModelTester(self )
_a : List[str] = MobileNetVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __lowercase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __lowercase ( self ) -> Tuple:
pass
def __lowercase ( self ) -> List[str]:
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[int] = [*signature.parameters.keys()]
_a : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> Tuple:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> List[Any]:
def check_hidden_states_output(_a , _a , _a ):
_a : Union[str, Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : int = model(**self._prepare_for_class(_a , _a ) )
_a : Union[str, Any] = outputs.hidden_states
_a : int = 2_6
self.assertEqual(len(_a ) , _a )
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Any = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> str:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Tuple = MobileNetVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> List[Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __lowercase ( self ) -> Tuple:
_a : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(_a )
_a : Any = self.default_image_processor
_a : Dict = prepare_img()
_a : Union[str, Any] = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Any = model(**_a )
# verify the logits
_a : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , _a )
_a : List[Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14
| 1
|
from __future__ import annotations
from typing import TypedDict
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: str
lowerCamelCase__: int
def _a ( __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCamelCase ) )]
def _a ( __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
a_ : int = all_rotations(__UpperCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
a_ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCamelCase ),
}
return response
def _a ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
a_ : str = int(__UpperCamelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCamelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
a_ : Union[str, Any] = [""""""] * len(__UpperCamelCase )
for _ in range(len(__UpperCamelCase ) ):
for i in range(len(__UpperCamelCase ) ):
a_ : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__lowerCamelCase = '''Provide a string that I will generate its BWT transform: '''
__lowerCamelCase = input(entry_msg).strip()
__lowerCamelCase = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result["bwt_string"]}\''''
)
__lowerCamelCase = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
f'''we get original string \'{original_string}\''''
)
| 478
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a__ ( unittest.TestCase ):
def UpperCAmelCase( self : int ):
a_ : Any = 0
def UpperCAmelCase( self : Dict ):
a_ : List[str] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Any = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
a_ : Tuple = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
a_ : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : List[Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Dict = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
a_ : List[str] = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
a_ : int = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a_ : Tuple = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
a_ : Tuple = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a_ : Optional[int] = AutoImageProcessor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""image_processor_type""" )
a_ : Tuple = CLIPImageProcessor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
a_ : Optional[int] = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
a_ : str = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Union[str, Any] = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
a_ : Optional[int] = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : int ):
with self.assertRaisesRegex(
lowerCamelCase_ , """clip-base is not a local folder and is not a valid model identifier""" ):
a_ : Any = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCAmelCase( self : Optional[Any] ):
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
a_ : Dict = AutoImageProcessor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def UpperCAmelCase( self : Dict ):
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
a_ : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase( self : str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase_ ):
a_ : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
a_ : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
a_ : int = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase_ )
a_ : int = AutoImageProcessor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def UpperCAmelCase( self : List[str] ):
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoImageProcessor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoImageProcessor.register(lowerCamelCase_ , lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Dict = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
a_ : Union[str, Any] = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
a_ : str = CustomImageProcessor.from_pretrained(lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase_ )
a_ : Any = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase( self : Union[str, Any] ):
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: List[Any] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoImageProcessor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
a_ : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a_ : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a_ : Dict = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 478
| 1
|
'''simple docstring'''
import math
import qiskit
def UpperCamelCase__ ( lowerCAmelCase = 1 , lowerCAmelCase = 1 , lowerCAmelCase = 1 ):
"""simple docstring"""
if (
isinstance(__a , __a )
or isinstance(__a , __a )
or isinstance(__a , __a )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(__a ) != input_a)
or (math.floor(__a ) != input_a)
or (math.floor(__a ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
_lowerCAmelCase = qiskit.QuantumRegister(4 , """qr""" )
_lowerCAmelCase = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
_lowerCAmelCase = [input_a, input_a, carry_in]
_lowerCAmelCase = qiskit.QuantumCircuit(__a , __a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __a ) # measure the last two qbits
_lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
_lowerCAmelCase = qiskit.execute(__a , __a , shots=10_00 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 207
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase_ : Dict = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : int = logging.getLogger(__name__)
@dataclass(frozen=__A )
class a__ :
"""simple docstring"""
__UpperCamelCase : Tuple = 42
__UpperCamelCase : Tuple = 42
__UpperCamelCase : Dict = None
__UpperCamelCase : List[Any] = None
__UpperCamelCase : List[str] = None
@dataclass(frozen=__A )
class a__ :
"""simple docstring"""
__UpperCamelCase : List[str] = 42
__UpperCamelCase : Tuple = None
__UpperCamelCase : str = None
__UpperCamelCase : int = None
__UpperCamelCase : Union[str, Any] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = 42
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase=False , __lowercase = False , ):
__lowerCAmelCase = hans_processors[task]()
__lowerCAmelCase = os.path.join(
__A , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__A ) , __A , ) , )
__lowerCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCAmelCase , __lowerCAmelCase = label_list[2], label_list[1]
__lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + '''.lock'''
with FileLock(__A ):
if os.path.exists(__A ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
__lowerCAmelCase = torch.load(__A )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
__lowerCAmelCase = (
processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A )
)
logger.info('''Training examples: %s''' , len(__A ) )
__lowerCAmelCase = hans_convert_examples_to_features(__A , __A , __A , __A )
logger.info('''Saving features into cached file %s''' , __A )
torch.save(self.features , __A )
def __len__(self ):
return len(self.features )
def __getitem__(self , __lowercase ):
return self.features[i]
def _snake_case (self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class a__ :
"""simple docstring"""
__UpperCamelCase : str = 42
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase = 1_28 , __lowercase=False , __lowercase = False , ):
__lowerCAmelCase = hans_processors[task]()
__lowerCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCAmelCase , __lowerCAmelCase = label_list[2], label_list[1]
__lowerCAmelCase = label_list
__lowerCAmelCase = processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A )
__lowerCAmelCase = hans_convert_examples_to_features(__A , __A , __A , __A )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__A )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__lowerCAmelCase = tf.data.Dataset.from_generator(
__A , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _snake_case (self ):
return self.dataset
def __len__(self ):
return len(self.features )
def __getitem__(self , __lowercase ):
return self.features[i]
def _snake_case (self ):
return self.label_list
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self , __lowercase ):
return self._create_examples(self._read_tsv(os.path.join(__A , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _snake_case (self , __lowercase ):
return self._create_examples(self._read_tsv(os.path.join(__A , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _snake_case (self ):
return ["contradiction", "entailment", "neutral"]
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = []
for i, line in enumerate(__A ):
if i == 0:
continue
__lowerCAmelCase = '''%s-%s''' % (set_type, line[0])
__lowerCAmelCase = line[5]
__lowerCAmelCase = line[6]
__lowerCAmelCase = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__lowerCAmelCase = line[0]
examples.append(InputExample(guid=__A , text_a=__A , text_b=__A , label=__A , pairID=__A ) )
return examples
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
__lowerCAmelCase = {label: i for i, label in enumerate(_lowercase)}
__lowerCAmelCase = []
for ex_index, example in tqdm.tqdm(enumerate(_lowercase), desc='''convert examples to features'''):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index))
__lowerCAmelCase = tokenizer(
example.text_a, example.text_b, add_special_tokens=_lowercase, max_length=_lowercase, padding='''max_length''', truncation=_lowercase, return_overflowing_tokens=_lowercase, )
__lowerCAmelCase = label_map[example.label] if example.label in label_map else 0
__lowerCAmelCase = int(example.pairID)
features.append(InputFeatures(**_lowercase, label=_lowercase, pairID=_lowercase))
for i, example in enumerate(examples[:5]):
logger.info('''*** Example ***''')
logger.info(F"""guid: {example}""")
logger.info(F"""features: {features[i]}""")
return features
_UpperCAmelCase : int = {
"""hans""": 3,
}
_UpperCAmelCase : Dict = {
"""hans""": HansProcessor,
}
| 718
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self , __lowercase , __lowercase , __lowercase=None , __lowercase=None ):
__lowerCAmelCase = self.layer[current_layer](__lowercase , __lowercase , head_mask[current_layer] )
__lowerCAmelCase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __A , )
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__(__lowercase )
__lowerCAmelCase = BertEncoderWithPabee(__lowercase )
self.init_weights()
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
def _snake_case (self , __lowercase ):
__lowerCAmelCase = threshold
def _snake_case (self , __lowercase ):
__lowerCAmelCase = patience
def _snake_case (self ):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
def _snake_case (self ):
__lowerCAmelCase = self.inference_layers_num / self.inference_instances_num
__lowerCAmelCase = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(__lowercase )
@add_start_docstrings_to_model_forward(__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
__lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCAmelCase = torch.ones(__lowercase , device=__lowercase )
if token_type_ids is None:
__lowerCAmelCase = torch.zeros(__lowercase , dtype=torch.long , device=__lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCAmelCase = self.get_extended_attention_mask(__lowercase , __lowercase , __lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = encoder_hidden_states.size()
__lowerCAmelCase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__lowerCAmelCase = torch.ones(__lowercase , device=__lowercase )
__lowerCAmelCase = self.invert_attention_mask(__lowercase )
else:
__lowerCAmelCase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCAmelCase = self.get_head_mask(__lowercase , self.config.num_hidden_layers )
__lowerCAmelCase = self.embeddings(
input_ids=__lowercase , position_ids=__lowercase , token_type_ids=__lowercase , inputs_embeds=__lowercase )
__lowerCAmelCase = embedding_output
if self.training:
__lowerCAmelCase = []
for i in range(self.config.num_hidden_layers ):
__lowerCAmelCase = self.encoder.adaptive_forward(
__lowercase , current_layer=__lowercase , attention_mask=__lowercase , head_mask=__lowercase )
__lowerCAmelCase = self.pooler(__lowercase )
__lowerCAmelCase = output_layers[i](output_dropout(__lowercase ) )
res.append(__lowercase )
elif self.patience == 0: # Use all layers for inference
__lowerCAmelCase = self.encoder(
__lowercase , attention_mask=__lowercase , head_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__lowerCAmelCase = self.pooler(encoder_outputs[0] )
__lowerCAmelCase = [output_layers[self.config.num_hidden_layers - 1](__lowercase )]
else:
__lowerCAmelCase = 0
__lowerCAmelCase = None
__lowerCAmelCase = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__lowerCAmelCase = self.encoder.adaptive_forward(
__lowercase , current_layer=__lowercase , attention_mask=__lowercase , head_mask=__lowercase )
__lowerCAmelCase = self.pooler(__lowercase )
__lowerCAmelCase = output_layers[i](__lowercase )
if regression:
__lowerCAmelCase = logits.detach()
if patient_result is not None:
__lowerCAmelCase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__lowerCAmelCase = 0
else:
__lowerCAmelCase = logits.detach().argmax(dim=1 )
if patient_result is not None:
__lowerCAmelCase = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__lowercase ) ):
patient_counter += 1
else:
__lowerCAmelCase = 0
__lowerCAmelCase = logits
if patient_counter == self.patience:
break
__lowerCAmelCase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __A , )
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__(__lowercase )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = BertModelWithPabee(__lowercase )
__lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCAmelCase = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
__lowerCAmelCase = self.bert(
input_ids=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__lowerCAmelCase = (logits[-1],)
if labels is not None:
__lowerCAmelCase = None
__lowerCAmelCase = 0
for ix, logits_item in enumerate(__lowercase ):
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase = MSELoss()
__lowerCAmelCase = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__lowerCAmelCase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__lowerCAmelCase = (total_loss / total_weights,) + outputs
return outputs
| 474
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 201
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=1_6 , lowerCamelCase_=3_6 , lowerCamelCase_=6 , lowerCamelCase_=6 , lowerCamelCase_=6 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> Dict:
_a : Dict = parent
_a : Tuple = batch_size
_a : Optional[int] = seq_length
_a : List[str] = is_training
_a : Tuple = use_input_mask
_a : Dict = use_token_type_ids
_a : List[str] = use_labels
_a : Optional[int] = vocab_size
_a : int = embedding_size
_a : Tuple = hidden_size
_a : str = num_hidden_layers
_a : List[str] = num_hidden_groups
_a : Union[str, Any] = num_attention_heads
_a : Dict = intermediate_size
_a : List[str] = hidden_act
_a : Tuple = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Tuple = max_position_embeddings
_a : Dict = type_vocab_size
_a : Tuple = type_sequence_label_size
_a : Dict = initializer_range
_a : str = num_labels
_a : Optional[Any] = num_choices
_a : Optional[int] = scope
def __UpperCamelCase ( self ) -> Optional[int]:
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[str] = None
if self.use_input_mask:
_a : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_a : Dict = None
if self.use_token_type_ids:
_a : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : List[str] = None
_a : Optional[int] = None
_a : List[Any] = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : str = ids_tensor([self.batch_size] , self.num_choices )
_a : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) -> Any:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_a : str = AlbertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : Tuple = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
_a : Dict = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
_a : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_a : Optional[int] = AlbertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : Optional[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , sentence_order_label=lowerCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_a : Union[str, Any] = AlbertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_a : Union[str, Any] = AlbertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : Tuple = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_a : str = self.num_labels
_a : str = AlbertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_a : Optional[int] = self.num_labels
_a : Any = AlbertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_a : Optional[int] = self.num_choices
_a : Any = AlbertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Optional[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : Optional[int] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[int] = config_and_inputs
_a : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : List[Any] = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Optional[int] = True
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> List[str]:
_a : Optional[Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
_a : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ )
_a : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def __UpperCamelCase ( self ) -> List[Any]:
_a : int = AlbertModelTester(self )
_a : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7 )
def __UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Optional[int]:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Tuple:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Any:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Tuple:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Dict:
_a : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Optional[Any] = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@slow
def __UpperCamelCase ( self ) -> Optional[Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : int = AlbertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class a ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : Dict = AlbertModel.from_pretrained('albert-base-v2' )
_a : Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_a : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
_a : str = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCamelCase_ )
_a : int = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4 ) )
| 120
| 0
|
import math
import tensorflow as tf
from packaging import version
def __a ( A__ : Any ):
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A__ )
SCREAMING_SNAKE_CASE = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __a ( A__ : int ):
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A__ )
SCREAMING_SNAKE_CASE = tf.cast(math.pi , x.dtype )
SCREAMING_SNAKE_CASE = tf.cast(0.0_4_4_7_1_5 , x.dtype )
SCREAMING_SNAKE_CASE = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(A__ , 3 )) ))
return x * cdf
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A__ )
return x * tf.tanh(tf.math.softplus(A__ ) )
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A__ )
SCREAMING_SNAKE_CASE = tf.cast(0.0_4_4_7_1_5 , x.dtype )
SCREAMING_SNAKE_CASE = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A__ )
SCREAMING_SNAKE_CASE = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __a ( A__ : List[Any] ):
return tf.clip_by_value(_gelu(A__ ) , -10 , 10 )
def __a ( A__ : List[Any] , A__ : int=-1 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tf.split(A__ , 2 , axis=A__ )
return a * tf.math.sigmoid(A__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def __a ( A__ : Optional[Any] ):
return tf.keras.activations.gelu(A__ , approximate=A__ )
__A : Dict = tf.keras.activations.gelu
__A : Optional[int] = approximate_gelu_wrap
else:
__A : Tuple = _gelu
__A : Any = _gelu_new
__A : List[str] = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def __a ( A__ : Tuple ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 698
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : int = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neo"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_layers
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_dropout
SCREAMING_SNAKE_CASE = embed_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = attention_types
SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@staticmethod
def _snake_case ( __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ):
import torch
SCREAMING_SNAKE_CASE = input.size()
SCREAMING_SNAKE_CASE = len(A__ )
SCREAMING_SNAKE_CASE = shape[dimension]
SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ )
SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1
SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE = [slice(A__ )] * rank
SCREAMING_SNAKE_CASE = indices
SCREAMING_SNAKE_CASE = input[s]
SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def __a ( A__ : Union[str, Any] , A__ : Optional[int] ):
import torch
SCREAMING_SNAKE_CASE = torch.arange(1 , A__ )
SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ )
SCREAMING_SNAKE_CASE = remainders == 0
SCREAMING_SNAKE_CASE = candidates[divisor_indices]
SCREAMING_SNAKE_CASE = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _snake_case ( self : Optional[int] ):
return self._config.num_heads
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self : Optional[int] ):
return 13
| 698
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64
|
def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( _A = 1000 ):
a : str = 2**power
a : Optional[Any] = str(_A )
a : List[Any] = list(_A )
a : Optional[Any] = 0
for i in list_num:
sum_of_num += int(_A )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase: Dict = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
lowerCAmelCase: str = solution(power)
print('Sum of the digits is: ', result)
| 195
|
'''simple docstring'''
class a__:
def __init__( self : Dict , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Tuple ):
a : List[str] = name
a : Dict = value
a : List[str] = weight
def __repr__( self : int ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowercase_ ( self : Optional[int] ):
return self.value
def lowercase_ ( self : List[str] ):
return self.name
def lowercase_ ( self : int ):
return self.weight
def lowercase_ ( self : List[str] ):
return self.value / self.weight
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[int] = []
for i in range(len(_A ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[Any] = sorted(_A , key=_A , reverse=_A )
a : Optional[int] = []
a , a : str = 0.0, 0.0
for i in range(len(_A ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
| 1
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = "▁"
lowerCamelCase__ = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
lowerCamelCase__ = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
lowerCamelCase__ = {
"facebook/s2t-small-librispeech-asr": 1024,
}
lowerCamelCase__ = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
lowerCamelCase__ = {"mustc": MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :Dict = MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE__ :Tuple = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ :List[int] = []
def __init__( self : List[str] , __a : Optional[Any] , __a : str , __a : Union[str, Any]="<s>" , __a : List[str]="</s>" , __a : Optional[Any]="<pad>" , __a : str="<unk>" , __a : List[str]=False , __a : Optional[Any]=False , __a : Union[str, Any]=None , __a : Optional[Any]=None , __a : Optional[Dict[str, Any]] = None , **__a : str , ) -> None:
_UpperCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_UpperCamelCase : str = do_upper_case
_UpperCamelCase : List[str] = do_lower_case
_UpperCamelCase : List[str] = load_json(__a )
_UpperCamelCase : List[str] = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : List[Any] = spm_file
_UpperCamelCase : Union[str, Any] = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_UpperCamelCase : Any = lang_codes
_UpperCamelCase : int = LANGUAGES[lang_codes]
_UpperCamelCase : Any = [F'''<lang:{lang}>''' for lang in self.langs]
_UpperCamelCase : Dict = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
_UpperCamelCase : Optional[int] = self.lang_tokens
_UpperCamelCase : Optional[int] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_UpperCamelCase : int = {}
@property
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
return len(self.encoder )
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self._tgt_lang
@tgt_lang.setter
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] ) -> None:
_UpperCamelCase : Tuple = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str ) -> None:
_UpperCamelCase : int = self.lang_code_to_id[tgt_lang]
_UpperCamelCase : Any = [lang_code_id]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> Optional[Any]:
return self.encoder.get(__a , self.encoder[self.unk_token] )
def __SCREAMING_SNAKE_CASE ( self : int , __a : int ) -> str:
return self.decoder.get(__a , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[str] ) -> str:
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_UpperCamelCase : Dict = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_UpperCamelCase : Any = []
else:
current_sub_tokens.append(__a )
_UpperCamelCase : List[str] = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Tuple , __a : List[Any]=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_UpperCamelCase : Tuple = [1] * len(self.prefix_tokens )
_UpperCamelCase : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
_UpperCamelCase : Any = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Dict:
_UpperCamelCase : Any = self.__dict__.copy()
_UpperCamelCase : List[Any] = None
return state
def __setstate__( self : List[Any] , __a : Dict ) -> None:
_UpperCamelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCamelCase : Optional[int] = {}
_UpperCamelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase : List[str] = Path(__a )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
_UpperCamelCase : Optional[int] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_UpperCamelCase : List[str] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_UpperCamelCase : Tuple = sentencepiece.SentencePieceProcessor(**lowercase_ )
spm.Load(str(lowercase_ ) )
return spm
def lowercase__ ( lowercase_ ) -> Union[Dict, List]:
"""simple docstring"""
with open(lowercase_ ,"r" ) as f:
return json.load(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
with open(lowercase_ ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ ,indent=2 )
| 624
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 624
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[str] = IFImgaImgSuperResolutionPipeline
snake_case__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
snake_case__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
snake_case__ : Any = PipelineTesterMixin.required_optional_params - {'latents'}
def _UpperCamelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def _UpperCamelCase ( self :int , __magic_name__ :Optional[int] , __magic_name__ :str=0 ) -> int:
'''simple docstring'''
if str(__magic_name__ ).startswith('''mps''' ):
a__ = torch.manual_seed(__magic_name__ )
else:
a__ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
a__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
a__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _UpperCamelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _UpperCamelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _UpperCamelCase ( self :Any ) -> List[str]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _UpperCamelCase ( self :List[str] ) -> Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _UpperCamelCase ( self :Any ) -> Tuple:
'''simple docstring'''
self._test_save_load_local()
def _UpperCamelCase ( self :str ) -> Tuple:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 703
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ = JsonDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_json_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = JsonDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_json_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
a__ = tmp_path / '''cache'''
a__ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = JsonDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
a__ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
a__ = features.copy()
a__ = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = tmp_path / '''cache'''
a__ = JsonDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
a__ = JsonDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_json_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
if issubclass(UpperCamelCase , UpperCamelCase ):
a__ = jsonl_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
a__ = [jsonl_path]
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
a__ = JsonDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_json_dataset(UpperCamelCase , UpperCamelCase )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str:
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
a__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
"""simple docstring"""
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_json_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = JsonDatasetReader({'''train''': jsonl_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_json_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if split:
a__ = {split: jsonl_path}
else:
a__ = '''train'''
a__ = {'''train''': jsonl_path, '''test''': jsonl_path}
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
a__ = JsonDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_json_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __snake_case ( UpperCamelCase ) -> Any:
"""simple docstring"""
return json.load(UpperCamelCase )
def __snake_case ( UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return [json.loads(UpperCamelCase ) for line in buffer]
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Any , __magic_name__ :Optional[int] ) -> str:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ ).write()
buffer.seek(0 )
a__ = load_json_function(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
assert isinstance(exported_content[0] , __magic_name__ )
assert len(__magic_name__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def _UpperCamelCase ( self :Union[str, Any] , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :Any ) -> List[Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ , orient=__magic_name__ ).write()
buffer.seek(0 )
a__ = load_json(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__magic_name__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__magic_name__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ , num_proc=2 ).write()
buffer.seek(0 )
a__ = load_json_function(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
assert isinstance(exported_content[0] , __magic_name__ )
assert len(__magic_name__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def _UpperCamelCase ( self :Any , __magic_name__ :Any , __magic_name__ :Optional[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Any ) -> Optional[Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ , orient=__magic_name__ , num_proc=2 ).write()
buffer.seek(0 )
a__ = load_json(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__magic_name__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__magic_name__ ) == 10
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :int ) -> Dict:
'''simple docstring'''
with pytest.raises(__magic_name__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def _UpperCamelCase ( self :Tuple , __magic_name__ :Optional[Any] , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :List[Any] , __magic_name__ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
a__ = tmp_path_factory.mktemp('''data''' ) / F"test.json.{extension}"
a__ = str(shared_datadir / F"test_file.json.{extension}" )
JsonDatasetWriter(__magic_name__ , __magic_name__ , compression=__magic_name__ ).write()
with fsspec.open(__magic_name__ , '''rb''' , compression='''infer''' ) as f:
a__ = f.read()
with fsspec.open(__magic_name__ , '''rb''' , compression='''infer''' ) as f:
a__ = f.read()
assert exported_content == original_content
| 158
| 0
|
'''simple docstring'''
def snake_case ( a_ : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(a_ , a_ ):
return 0
elif n == 2:
return 1
else:
UpperCamelCase_ : Dict = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def snake_case ( a_ : int ) -> int:
"""simple docstring"""
UpperCamelCase_ : Any = 0
UpperCamelCase_ : Union[str, Any] = 2
while digits < n:
index += 1
UpperCamelCase_ : Any = len(str(fibonacci(a_ ) ) )
return index
def snake_case ( a_ : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(a_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 208
|
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase =logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , **__lowerCAmelCase ):
super().__init__(**__lowerCAmelCase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__lowerCAmelCase )
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
UpperCamelCase_ : int = {}
UpperCamelCase_ : int = {}
UpperCamelCase_ : Any = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
UpperCamelCase_ : str = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
UpperCamelCase_ : Any = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
UpperCamelCase_ : str = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
UpperCamelCase_ : List[str] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
UpperCamelCase_ : Tuple = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
UpperCamelCase_ : Optional[Any] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
UpperCamelCase_ : Dict = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
UpperCamelCase_ : Optional[int] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
UpperCamelCase_ : List[Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , __lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
return super().__call__(__lowerCAmelCase , *__lowerCAmelCase , num_workers=__lowerCAmelCase , batch_size=__lowerCAmelCase , **__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase=64 , __lowerCAmelCase = 0 , __lowerCAmelCase = 5_12 / 15_00 , __lowerCAmelCase = 32 , __lowerCAmelCase = 1 , ):
UpperCamelCase_ : Tuple = load_image(__lowerCAmelCase )
UpperCamelCase_ : Tuple = self.image_processor.size["""longest_edge"""]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Dict = self.image_processor.generate_crop_boxes(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : Any = self.image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase_ : str = self.get_inference_context()
with inference_context():
UpperCamelCase_ : Optional[int] = self._ensure_tensor_on_device(__lowerCAmelCase , device=self.device )
UpperCamelCase_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
UpperCamelCase_ : List[Any] = image_embeddings
UpperCamelCase_ : Any = grid_points.shape[1]
UpperCamelCase_ : List[str] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Dict = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase_ : List[Any] = input_labels[:, i : i + points_per_batch]
UpperCamelCase_ : Dict = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase=0.88 , __lowerCAmelCase=0.95 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , ):
UpperCamelCase_ : Union[str, Any] = model_inputs.pop("""input_boxes""" )
UpperCamelCase_ : Tuple = model_inputs.pop("""is_last""" )
UpperCamelCase_ : Dict = model_inputs.pop("""original_sizes""" ).tolist()
UpperCamelCase_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
UpperCamelCase_ : Optional[Any] = self.model(**__lowerCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase_ : List[str] = model_outputs["""pred_masks"""]
UpperCamelCase_ : Optional[int] = self.image_processor.post_process_masks(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , binarize=__lowerCAmelCase )
UpperCamelCase_ : List[str] = model_outputs["""iou_scores"""]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Optional[Any] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=0.7 , ):
UpperCamelCase_ : Tuple = []
UpperCamelCase_ : Optional[Any] = []
UpperCamelCase_ : Tuple = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
UpperCamelCase_ : Any = torch.cat(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = torch.cat(__lowerCAmelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Any = self.image_processor.post_process_for_mask_generation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : List[Any] = defaultdict(__lowerCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = {}
if output_rle_mask:
UpperCamelCase_ : int = rle_mask
if output_bboxes_mask:
UpperCamelCase_ : int = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 208
| 1
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = '''MCTCTFeatureExtractor'''
_lowerCamelCase: Optional[int] = '''AutoTokenizer'''
def __init__( self : str ,A_ : Tuple ,A_ : int ) -> List[str]:
super().__init__(A_ ,A_ )
A = self.feature_extractor
A = False
def __call__( self : Any ,*A_ : int ,**A_ : str ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_ ,**A_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A = kwargs.pop('raw_speech' )
else:
A = kwargs.pop('audio' ,A_ )
A = kwargs.pop('sampling_rate' ,A_ )
A = kwargs.pop('text' ,A_ )
if len(A_ ) > 0:
A = args[0]
A = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A = self.feature_extractor(A_ ,*A_ ,sampling_rate=A_ ,**A_ )
if text is not None:
A = self.tokenizer(A_ ,**A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A = encodings['input_ids']
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple ,*A_ : List[str] ,**A_ : Union[str, Any] ) -> int:
return self.tokenizer.batch_decode(*A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : List[Any] ) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A_ ,**A_ )
A = kwargs.pop('input_features' ,A_ )
A = kwargs.pop('labels' ,A_ )
if len(A_ ) > 0:
A = args[0]
A = args[1:]
if input_features is not None:
A = self.feature_extractor.pad(A_ ,*A_ ,**A_ )
if labels is not None:
A = self.tokenizer.pad(A_ ,**A_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A = labels['input_ids']
return input_features
def _SCREAMING_SNAKE_CASE ( self : Tuple ,*A_ : Dict ,**A_ : List[str] ) -> List[str]:
return self.tokenizer.decode(*A_ ,**A_ )
@contextmanager
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A = True
A = self.tokenizer
yield
A = self.feature_extractor
A = False
| 22
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin
| 22
| 1
|
import numpy as np
a_ = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class lowercase__ :
def __init__( self )-> None:
'''simple docstring'''
lowerCAmelCase__ = np.array(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = np.where(letter == self.SQUARE )
lowerCAmelCase__ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = message.lower()
lowerCAmelCase__ = message.replace(" " , "" )
lowerCAmelCase__ = message.replace("j" , "i" )
lowerCAmelCase__ = np.empty((2, len(__UpperCAmelCase )) )
for letter_index in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase__ = numbers[0]
lowerCAmelCase__ = numbers[1]
lowerCAmelCase__ = first_step.reshape(2 * len(__UpperCAmelCase ) )
lowerCAmelCase__ = ""
for numbers_index in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = int(second_step[numbers_index * 2] )
lowerCAmelCase__ = int(second_step[(numbers_index * 2) + 1] )
lowerCAmelCase__ = self.numbers_to_letter(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = encoded_message + letter
return encoded_message
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = message.lower()
message.replace(" " , "" )
lowerCAmelCase__ = np.empty(2 * len(__UpperCAmelCase ) )
for letter_index in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase__ = numbers[0]
lowerCAmelCase__ = numbers[1]
lowerCAmelCase__ = first_step.reshape((2, len(__UpperCAmelCase )) )
lowerCAmelCase__ = ""
for numbers_index in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = int(second_step[0, numbers_index] )
lowerCAmelCase__ = int(second_step[1, numbers_index] )
lowerCAmelCase__ = self.numbers_to_letter(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = decoded_message + letter
return decoded_message
| 339
|
from __future__ import annotations
def _a ( UpperCamelCase_ : list[int] , UpperCamelCase_ : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(UpperCamelCase_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase__ = i + 1
else:
lowerCAmelCase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{two_pointer([2, 7, 11, 15], 9) = }")
| 339
| 1
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__snake_case : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__lowerCamelCase ) )
return round(__lowerCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__UpperCAmelCase : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __snake_case ( self : Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
__snake_case : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__snake_case : Tuple = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
__snake_case : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
__snake_case : List[str] = CLIPTextModel(lowerCamelCase )
__snake_case : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=lowerCamelCase )
__snake_case : List[str] = CLIPTextModelWithProjection(lowerCamelCase )
__snake_case : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=lowerCamelCase )
__snake_case : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]=0 ) -> Union[str, Any]:
__snake_case : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__snake_case : Any = image / 2 + 0.5
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Dict = torch.manual_seed(lowerCamelCase )
else:
__snake_case : int = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def __snake_case ( self : Dict ) -> Any:
__snake_case : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case : Any = self.get_dummy_components()
__snake_case : int = StableDiffusionXLImgaImgPipeline(**lowerCamelCase )
__snake_case : List[str] = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Tuple = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Dict = sd_pipe(**lowerCamelCase ).images
__snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : str = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self : str ) -> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __snake_case ( self : Any ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __snake_case ( self : str ) -> Optional[int]:
pass
def __snake_case ( self : Tuple ) -> Union[str, Any]:
__snake_case : str = self.get_dummy_components()
__snake_case : List[Any] = StableDiffusionXLImgaImgPipeline(**lowerCamelCase )
__snake_case : Optional[Any] = sd_pipe.to(lowerCamelCase )
__snake_case : int = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
# forward without prompt embeds
__snake_case : List[str] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : str = 3 * ["this is a negative prompt"]
__snake_case : Any = negative_prompt
__snake_case : Optional[Any] = 3 * [inputs["prompt"]]
__snake_case : int = sd_pipe(**lowerCamelCase )
__snake_case : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__snake_case : List[Any] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Optional[Any] = 3 * ["this is a negative prompt"]
__snake_case : int = 3 * [inputs.pop("prompt" )]
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = sd_pipe.encode_prompt(lowerCamelCase , negative_prompt=lowerCamelCase )
__snake_case : Tuple = sd_pipe(
**lowerCamelCase , prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , pooled_prompt_embeds=lowerCamelCase , negative_pooled_prompt_embeds=lowerCamelCase , )
__snake_case : List[str] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]="cpu" , lowerCamelCase : str=torch.floataa , lowerCamelCase : int=0 ) -> Dict:
__snake_case : int = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Optional[Any] = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 64, 64) )
__snake_case : Optional[Any] = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase )
__snake_case : List[str] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : str ) -> Any:
__snake_case : List[str] = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : int = self.get_inputs(lowerCamelCase )
__snake_case : Optional[Any] = pipe(**lowerCamelCase ).images
__snake_case : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__snake_case : Optional[int] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 203
| 0
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''prophetnet.tokenizer'''}
__snake_case = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
__snake_case = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
__snake_case = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_1_2,
}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = collections.OrderedDict()
with open(_lowercase , 'r' , encoding='utf-8' ) as reader:
__UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
__UpperCamelCase = token.rstrip('\n' )
__UpperCamelCase = index
return vocab
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self: str,A_: int,A_: str="[SEP]",A_: List[Any]="[SEP]",A_: str="[SEP]",A_: Any="[UNK]",A_: Optional[int]="[PAD]",A_: List[str]="[CLS]",A_: Dict="[MASK]",A_: Optional[Dict[str, Any]] = None,**A_: str,):
'''simple docstring'''
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_,eos_token=A_,sep_token=A_,unk_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,sp_model_kwargs=self.sp_model_kwargs,**A_,)
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
__UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__UpperCamelCase = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
__UpperCamelCase = F'''[unused{i}]'''
__UpperCamelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__UpperCamelCase = 12
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A_ )
def __getstate__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self: List[Any],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self,'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self: Any,A_: List[int],A_: Optional[List[int]] = None,A_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_,token_ids_a=A_,already_has_special_tokens=A_ )
if token_ids_a is None:
return ([0] * len(A_ )) + [1]
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def snake_case_ ( self: Optional[int],A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self: List[Any],A_: str ):
'''simple docstring'''
return self.sp_model.encode(A_,out_type=A_ )
def snake_case_ ( self: Any,A_: Optional[int] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self: str,A_: int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self: Tuple,A_: int ):
'''simple docstring'''
__UpperCamelCase = ''.join(A_ ).replace(A_,' ' ).strip()
return out_string
def snake_case_ ( self: Optional[int],A_: str,A_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
A_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_,'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def snake_case_ ( self: Tuple,A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 1
|
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693
| 0
|
from __future__ import annotations
__magic_name__ = list[list[int]]
# assigning initial values to the grid
__magic_name__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__magic_name__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if location := find_empty_location(_lowerCamelCase ):
snake_case__ , snake_case__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
snake_case__ = digit
if sudoku(_lowerCamelCase ) is not None:
return grid
snake_case__ = 0
return None
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
for row in grid:
for cell in row:
print(_lowerCamelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__magic_name__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 700
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
if len(__lowerCAmelCase ) == 0:
return False
snake_case__ = len(__lowerCAmelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __lowerCAmelCase )
else:
return binary_search(a_list[midpoint + 1 :] , __lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ = input('''Enter numbers separated by comma:\n''').strip()
__magic_name__ = [int(item.strip()) for item in user_input.split(''',''')]
__magic_name__ = int(input('''Enter the number to be found in the list:\n''').strip())
__magic_name__ = '''''' if binary_search(sequence, target) else '''not '''
print(F'''{target} was {not_str}found in {sequence}''')
| 530
| 0
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
__UpperCamelCase : List[str] = 100
__UpperCamelCase : int = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__UpperCamelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def A ( _lowercase ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
SCREAMING_SNAKE_CASE : set[int] = set()
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A ( _lowercase = 5_000 ):
for number_to_partition in range(1 , _lowercase ):
if len(partition(_lowercase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 248
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = OmegaConf.load(_lowercase )
SCREAMING_SNAKE_CASE : int = torch.load(_lowercase , map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE : int = list(state_dict.keys() )
# extract state_dict for VQVAE
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : List[str] = '''first_stage_model.'''
for key in keys:
if key.startswith(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = state_dict[key]
# extract state_dict for UNetLDM
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Any = '''model.diffusion_model.'''
for key in keys:
if key.startswith(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = state_dict[key]
SCREAMING_SNAKE_CASE : int = config.model.params.first_stage_config.params
SCREAMING_SNAKE_CASE : Tuple = config.model.params.unet_config.params
SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**_lowercase ).eval()
vqvae.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = UNetLDMModel(**_lowercase ).eval()
unet.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_lowercase , )
SCREAMING_SNAKE_CASE : Optional[Any] = LDMPipeline(_lowercase , _lowercase , _lowercase )
pipeline.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
__UpperCamelCase : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 248
| 1
|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def A ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : bool ,_UpperCAmelCase : Optional[Dict[int, int]] = None ,_UpperCAmelCase : bool = False ,) -> str:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__lowerCAmelCase : str = new_id
# turn into Numpy arrays
__lowerCAmelCase : List[str] = np.array(_UpperCAmelCase )
__lowerCAmelCase : List[Any] = np.array(_UpperCAmelCase )
if reduce_labels:
__lowerCAmelCase : Optional[int] = 2_5_5
__lowerCAmelCase : str = label - 1
__lowerCAmelCase : Tuple = 2_5_5
__lowerCAmelCase : int = label != ignore_index
__lowerCAmelCase : Union[str, Any] = np.not_equal(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : Any = pred_label[mask]
__lowerCAmelCase : Union[str, Any] = np.array(_UpperCAmelCase )[mask]
__lowerCAmelCase : Optional[Any] = pred_label[pred_label == label]
__lowerCAmelCase : Optional[int] = np.histogram(_UpperCAmelCase ,bins=_UpperCAmelCase ,range=(0, num_labels - 1) )[0]
__lowerCAmelCase : Union[str, Any] = np.histogram(_UpperCAmelCase ,bins=_UpperCAmelCase ,range=(0, num_labels - 1) )[0]
__lowerCAmelCase : Optional[int] = np.histogram(_UpperCAmelCase ,bins=_UpperCAmelCase ,range=(0, num_labels - 1) )[0]
__lowerCAmelCase : Union[str, Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def A ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : bool ,_UpperCAmelCase : Optional[Dict[int, int]] = None ,_UpperCAmelCase : bool = False ,) -> List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa )
__lowerCAmelCase : Optional[Any] = np.zeros((num_labels,) ,dtype=np.floataa )
__lowerCAmelCase : List[str] = np.zeros((num_labels,) ,dtype=np.floataa )
__lowerCAmelCase : Tuple = np.zeros((num_labels,) ,dtype=np.floataa )
for result, gt_seg_map in zip(_UpperCAmelCase ,_UpperCAmelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = intersect_and_union(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def A ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : bool ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[Dict[int, int]] = None ,_UpperCAmelCase : bool = False ,) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = total_intersect_and_union(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# compute metrics
__lowerCAmelCase : Any = {}
__lowerCAmelCase : Optional[Any] = total_area_intersect.sum() / total_area_label.sum()
__lowerCAmelCase : Tuple = total_area_intersect / total_area_union
__lowerCAmelCase : List[str] = total_area_intersect / total_area_label
__lowerCAmelCase : Any = np.nanmean(_UpperCAmelCase )
__lowerCAmelCase : Tuple = np.nanmean(_UpperCAmelCase )
__lowerCAmelCase : List[str] = all_acc
__lowerCAmelCase : int = iou
__lowerCAmelCase : int = acc
if nan_to_num is not None:
__lowerCAmelCase : Tuple = {metric: np.nan_to_num(_UpperCAmelCase ,nan=_UpperCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ) -> Any:
__lowerCAmelCase : Optional[int] = mean_iou(
results=SCREAMING_SNAKE_CASE , gt_seg_maps=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , ignore_index=SCREAMING_SNAKE_CASE , nan_to_num=SCREAMING_SNAKE_CASE , label_map=SCREAMING_SNAKE_CASE , reduce_labels=SCREAMING_SNAKE_CASE , )
return iou_result
| 123
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = '''gpt_bigcode'''
_snake_case = ['''past_key_values''']
_snake_case = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , SCREAMING_SNAKE_CASE=5_02_57 , SCREAMING_SNAKE_CASE=10_24 , SCREAMING_SNAKE_CASE=7_68 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="gelu_pytorch_tanh" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=5_02_56 , SCREAMING_SNAKE_CASE=5_02_56 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : List[Any] = n_positions
__lowerCAmelCase : List[Any] = n_embd
__lowerCAmelCase : Optional[Any] = n_layer
__lowerCAmelCase : Any = n_head
__lowerCAmelCase : int = n_inner
__lowerCAmelCase : str = activation_function
__lowerCAmelCase : Dict = resid_pdrop
__lowerCAmelCase : Dict = embd_pdrop
__lowerCAmelCase : Union[str, Any] = attn_pdrop
__lowerCAmelCase : Dict = layer_norm_epsilon
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : int = scale_attn_weights
__lowerCAmelCase : Dict = use_cache
__lowerCAmelCase : Tuple = attention_softmax_in_fpaa
__lowerCAmelCase : Tuple = scale_attention_softmax_in_fpaa
__lowerCAmelCase : Optional[Any] = multi_query
__lowerCAmelCase : List[str] = bos_token_id
__lowerCAmelCase : Optional[Any] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 123
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : List[str] = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = spectrogram_length
_A = feature_size
_A = num_audio_channels
_A = hop_length
_A = chunk_length
_A = sampling_rate
def lowerCAmelCase__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TvltFeatureExtractor
def lowerCAmelCase__ ( self ):
_A = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case_ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case_ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Initialize feature_extractor
_A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A = feature_extractor(
snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
_A = self._load_datasamples(1 )
_A = TvltFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
| 27
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase : int = None
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Dict = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
lowerCamelCase : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
lowerCamelCase : List[str] = '▁'
# Segments (not really needed)
lowerCamelCase : List[Any] = 0
lowerCamelCase : Optional[int] = 1
lowerCamelCase : int = 2
lowerCamelCase : Optional[int] = 3
lowerCamelCase : Optional[int] = 4
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = """left"""
_snake_case = XLNetTokenizer
def __init__( self , A=None , A=None , A=False , A=True , A=False , A="<s>" , A="</s>" , A="<unk>" , A="<sep>" , A="<pad>" , A="<cls>" , A="<mask>" , A=["<eop>", "<eod>"] , **A , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
snake_case : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
snake_case : Optional[int] = 3
snake_case : str = do_lower_case
snake_case : Tuple = remove_space
snake_case : Optional[Any] = keep_accents
snake_case : Optional[int] = vocab_file
snake_case : List[str] = False if not self.vocab_file else True
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Optional[int] = [self.sep_token_id]
snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Optional[int] = [self.sep_token_id]
snake_case : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 587
| 0
|
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Dict = None , UpperCamelCase_ : str = None ):
if start is None:
_lowerCAmelCase : Dict = 0
if end is None:
_lowerCAmelCase : Optional[int] = len(UpperCamelCase__ ) - 1
if start >= end:
return
_lowerCAmelCase : int = (start + end) // 2
slowsort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
slowsort(UpperCamelCase__ , mid + 1 , UpperCamelCase__ )
if sequence[end] < sequence[mid]:
_lowerCAmelCase , _lowerCAmelCase : Dict = sequence[mid], sequence[end]
slowsort(UpperCamelCase__ , UpperCamelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703
|
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [int(UpperCamelCase_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(UpperCamelCase_ ) == 4 and all(0 <= int(UpperCamelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
_lowerCamelCase : List[str] = input().strip()
_lowerCamelCase : int = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 196
| 0
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCamelCase__ : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 33
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : str = ""
else:
_lowerCAmelCase : Optional[int] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Any = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Dict = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
_lowerCAmelCase : Dict = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCAmelCase : Optional[int] = 1_000
_lowerCAmelCase : Optional[Any] = "huggingface/label-files"
_lowerCAmelCase : Tuple = "imagenet-1k-id2label.json"
_lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Union[str, Any] = idalabel
_lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Dict = int(deit_name[-6:-4] )
_lowerCAmelCase : Any = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
_lowerCAmelCase : List[str] = 192
_lowerCAmelCase : int = 768
_lowerCAmelCase : int = 12
_lowerCAmelCase : Any = 3
elif deit_name[9:].startswith("small" ):
_lowerCAmelCase : int = 384
_lowerCAmelCase : Tuple = 1_536
_lowerCAmelCase : Dict = 12
_lowerCAmelCase : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
_lowerCAmelCase : List[str] = 1_024
_lowerCAmelCase : Tuple = 4_096
_lowerCAmelCase : Optional[Any] = 24
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : Any = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : Optional[Any] = timm_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
_lowerCAmelCase : Any = DeiTForImageClassificationWithTeacher(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
_lowerCAmelCase : Optional[int] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_lowerCAmelCase : Any = DeiTImageProcessor(size=_lowerCamelCase , crop_size=config.image_size )
_lowerCAmelCase : str = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : int = encoding["pixel_values"]
_lowerCAmelCase : int = model(_lowerCamelCase )
_lowerCAmelCase : Tuple = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 500
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Optional[int]=3 , __lowercase : Any=32 , __lowercase : List[str]=3 , __lowercase : Tuple=10 , __lowercase : Dict=[8, 16, 32, 64] , __lowercase : int=[1, 1, 2, 1] , __lowercase : Any=True , __lowercase : Dict=True , __lowercase : Optional[int]="relu" , __lowercase : Any=3 , __lowercase : int=None , __lowercase : Tuple=["stage2", "stage3", "stage4"] , __lowercase : List[str]=[2, 3, 4] , __lowercase : Tuple=1 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = embeddings_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = len(__lowercase )
snake_case_ = out_features
snake_case_ = out_indices
snake_case_ = num_groups
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : str ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case__ ( self : Dict , __lowercase : Dict , __lowercase : List[Any] , __lowercase : int ):
"""simple docstring"""
snake_case_ = BitModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case__ ( self : Tuple , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : Tuple ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = BitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ):
"""simple docstring"""
snake_case_ = BitBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = BitBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase_ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = BitModelTester(self )
snake_case_ = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions" )
def snake_case__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def snake_case__ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
pass
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__lowercase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowercase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowercase )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(config=__lowercase )
for name, module in model.named_modules():
if isinstance(__lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(__lowercase : int , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
snake_case_ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__lowercase , __lowercase ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case_ = layer_type
snake_case_ = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def snake_case__ ( self : str ):
"""simple docstring"""
pass
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def snake_case__ ( self : Dict ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BitModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : List[str] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowercase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__lowercase , return_tensors="pt" ).to(__lowercase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__lowercase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
snake_case_ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = BitConfig
lowerCAmelCase_ = False
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = BitModelTester(self )
| 139
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( _A , _A=False ):
'''simple docstring'''
snake_case_ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCamelCase__ ( _A , _A , _A=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = ""
else:
snake_case_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_A , _A )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = dct.pop(_A )
snake_case_ = val
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _A , _A , _A=False ):
'''simple docstring'''
snake_case_ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_A , )
snake_case_ = ViTHybridConfig(backbone_config=_A , image_size=384 , num_labels=1000 )
snake_case_ = False
# load original model from timm
snake_case_ = timm.create_model(_A , pretrained=_A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_A )
snake_case_ = create_rename_keys(_A , _A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , _A )
snake_case_ = "huggingface/label-files"
snake_case_ = "imagenet-1k-id2label.json"
snake_case_ = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(_A ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ = ViTHybridModel(_A ).eval()
else:
snake_case_ = ViTHybridForImageClassification(_A ).eval()
model.load_state_dict(_A )
# create image processor
snake_case_ = create_transform(**resolve_data_config({} , model=_A ) )
snake_case_ = transform.transforms
snake_case_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
snake_case_ = ViTHybridImageProcessor(
do_resize=_A , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_A , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ = prepare_img()
snake_case_ = transform(_A ).unsqueeze(0 )
snake_case_ = processor(_A , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_A , _A )
# verify logits
with torch.no_grad():
snake_case_ = model(_A )
snake_case_ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
snake_case_ = timm_model.forward_features(_A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_A , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ = timm_model(_A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_A , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
lowercase__ : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 139
| 1
|
from __future__ import annotations
class __lowerCamelCase :
def __init__( self: Optional[int],A_: str,A_: str ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = text, pattern
__UpperCamelCase, __UpperCamelCase = len(A_ ), len(A_ )
def snake_case_ ( self: Tuple,A_: str ):
'''simple docstring'''
for i in range(self.patLen - 1,-1,-1 ):
if char == self.pattern[i]:
return i
return -1
def snake_case_ ( self: List[str],A_: int ):
'''simple docstring'''
for i in range(self.patLen - 1,-1,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCamelCase = self.mismatch_in_text(A_ )
if mismatch_index == -1:
positions.append(A_ )
else:
__UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
__UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__snake_case = '''ABAABA'''
__snake_case = '''AB'''
__snake_case = BoyerMooreSearch(text, pattern)
__snake_case = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 1
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699
| 0
|
from collections.abc import Callable
def _lowerCamelCase( lowerCAmelCase__ : Callable[[float], float] , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : float = a
SCREAMING_SNAKE_CASE_ : float = b
if function(lowerCAmelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCAmelCase__ ) == 0:
return b
elif (
function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
SCREAMING_SNAKE_CASE_ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCAmelCase__ ) == 0:
return mid
elif function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) < 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = mid
else:
SCREAMING_SNAKE_CASE_ : Any = mid
SCREAMING_SNAKE_CASE_ : Any = start + (end - start) / 2.0
return mid
def _lowerCamelCase( lowerCAmelCase__ : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 97
|
import math
def _lowerCamelCase( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
'''simple docstring'''
return math.pow(lowerCAmelCase__ , 2 ) - a
def _lowerCamelCase( lowerCAmelCase__ : float ):
'''simple docstring'''
return 2 * x
def _lowerCamelCase( lowerCAmelCase__ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2.0
while start <= a:
SCREAMING_SNAKE_CASE_ : str = math.pow(lowerCAmelCase__ , 2 )
return start
def _lowerCamelCase( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 9999 , lowerCAmelCase__ : float = 0.00_000_000_000_001 ):
'''simple docstring'''
if a < 0:
raise ValueError('math domain error' )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_initial_point(lowerCAmelCase__ )
for _ in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = value
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value - fx(lowerCAmelCase__ , lowerCAmelCase__ ) / fx_derivative(lowerCAmelCase__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 97
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.