code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 613
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def _lowerCamelCase( a , a , a ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 528
| 0
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE : Optional[Any] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowercase ( _snake_case : Optional[int] , _snake_case : str=None ) ->int:
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 702
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowercase ( _snake_case : Optional[Any] ) ->Any:
"""simple docstring"""
return x + 2
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''x = 3'''
__snake_case : List[Any] = {}
__snake_case : Tuple = evaluate(a_ , {} , state=a_ )
assert result == 3
self.assertDictEqual(a_ , {'''x''': 3} )
__snake_case : Tuple = '''x = y'''
__snake_case : List[Any] = {'''y''': 5}
__snake_case : Union[str, Any] = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a_ , {'''x''': 5, '''y''': 5} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = '''y = add_two(x)'''
__snake_case : Any = {'''x''': 3}
__snake_case : Dict = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
assert result == 5
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
__snake_case : Union[str, Any] = evaluate(a_ , {} , state=a_ )
assert result is None
assert "tried to execute add_two" in out.out
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''x = 3'''
__snake_case : Union[str, Any] = {}
__snake_case : int = evaluate(a_ , {} , state=a_ )
assert result == 3
self.assertDictEqual(a_ , {'''x''': 3} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
__snake_case : int = {'''x''': 3}
__snake_case : Optional[Any] = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(a_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = '''x = 3\ny = 5'''
__snake_case : Any = {}
__snake_case : Any = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 5} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''text = f\'This is x: {x}.\''''
__snake_case : int = {'''x''': 3}
__snake_case : List[str] = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(a_ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
__snake_case : int = {'''x''': 3}
__snake_case : Union[str, Any] = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 2} )
__snake_case : Tuple = {'''x''': 8}
__snake_case : Optional[Any] = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a_ , {'''x''': 8, '''y''': 5} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = '''test_list = [x, add_two(x)]'''
__snake_case : Any = {'''x''': 3}
__snake_case : str = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
self.assertListEqual(a_ , [3, 5] )
self.assertDictEqual(a_ , {'''x''': 3, '''test_list''': [3, 5]} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = '''y = x'''
__snake_case : List[Any] = {'''x''': 3}
__snake_case : List[str] = evaluate(a_ , {} , state=a_ )
assert result == 3
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 3} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''test_list = [x, add_two(x)]\ntest_list[1]'''
__snake_case : Tuple = {'''x''': 3}
__snake_case : Dict = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
assert result == 5
self.assertDictEqual(a_ , {'''x''': 3, '''test_list''': [3, 5]} )
__snake_case : int = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
__snake_case : str = {'''x''': 3}
__snake_case : Union[str, Any] = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
assert result == 5
self.assertDictEqual(a_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''x = 0\nfor i in range(3):\n x = i'''
__snake_case : List[Any] = {}
__snake_case : Optional[int] = evaluate(a_ , {'''range''': range} , state=a_ )
assert result == 2
self.assertDictEqual(a_ , {'''x''': 2, '''i''': 2} )
| 229
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__lowercase)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Any , *a :List[str] , **a :Optional[Any] ) -> List[str]:
super().__init__(*a , **a )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _lowerCamelCase ( self :Union[str, Any] , a :Union[str, Any]=None ) -> Dict:
__UpperCamelCase : Any = {}
if top_k is not None:
__UpperCamelCase : Dict = top_k
return {}, {}, postprocess_params
def __call__( self :str , a :Union[str, List[str], "Image.Image", List["Image.Image"]] , **a :Any ) -> Any:
return super().__call__(a , **a )
def _lowerCamelCase ( self :Tuple , a :str ) -> List[str]:
__UpperCamelCase : List[str] = load_image(a )
__UpperCamelCase : Optional[Any] = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[Any] ) -> Union[str, Any]:
__UpperCamelCase : int = self.model(**a )
return model_outputs
def _lowerCamelCase ( self :Any , a :List[str] , a :Tuple=5 ) -> Optional[Any]:
if top_k > self.model.config.num_labels:
__UpperCamelCase : str = self.model.config.num_labels
if self.framework == "pt":
__UpperCamelCase : Optional[Any] = model_outputs.logits.softmax(-1 )[0]
__UpperCamelCase , __UpperCamelCase : int = probs.topk(a )
elif self.framework == "tf":
__UpperCamelCase : Optional[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
__UpperCamelCase : List[Any] = tf.math.top_k(a , k=a )
__UpperCamelCase , __UpperCamelCase : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCamelCase : Any = scores.tolist()
__UpperCamelCase : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a , a )]
| 557
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : int = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ['ConditionalDetrFeatureExtractor']
lowercase : int = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 557
| 1
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__a : Any =CLIPTokenizer
__a : str =CLIPTokenizerFast
__a : int =True
__a : Tuple ={}
__a : Any =False
def __snake_case ( self ):
super().setUp()
# fmt: off
lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __snake_case ( self , **UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __snake_case ( self , **UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = """lower newer"""
lowerCAmelCase = """lower newer"""
return input_text, output_text
def __snake_case ( self ):
lowerCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
lowerCAmelCase = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase = tokens + [tokenizer.unk_token]
lowerCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
@require_ftfy
def __snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
lowerCAmelCase = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase = """xa\u0303y""" + """ """ + """x\xe3y"""
lowerCAmelCase = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase = F"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
lowerCAmelCase = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowerCAmelCase = F""" {text}"""
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
lowerCAmelCase = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
def __snake_case ( self ):
with self.assertRaises(lowerCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def __snake_case ( self ):
super().test_tokenization_python_rust_equals()
def __snake_case ( self ):
pass
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ =logging.get_logger(__name__)
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__a : Optional[Any] ="""maskformer-swin"""
__a : Optional[int] ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(UpperCAmelCase_ )
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 33
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Dict = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
|
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657
| 0
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = Path(__file__).parent / """model_card_template.md"""
SCREAMING_SNAKE_CASE__ = uuida().hex
SCREAMING_SNAKE_CASE__ = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
SCREAMING_SNAKE_CASE__ = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
SCREAMING_SNAKE_CASE__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[Dict, str, None] = None ):
'''simple docstring'''
lowercase_ = F'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'; torch/{_torch_version}'
if is_flax_available():
ua += F'; jax/{_jax_version}'
ua += F'; flax/{_flax_version}'
if is_onnx_available():
ua += F'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join(F'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
if token is None:
lowercase_ = HfFolder.get_token()
if organization is None:
lowercase_ = whoami(__lowerCamelCase )["name"]
return F'{username}/{model_id}'
else:
return F'{organization}/{model_id}'
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__lowerCamelCase , "local_rank" ) and args.local_rank not in [-1, 0]:
return
lowercase_ = args.hub_token if hasattr(__lowerCamelCase , "hub_token" ) else None
lowercase_ = get_full_repo_name(__lowerCamelCase , token=__lowerCamelCase )
lowercase_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__lowerCamelCase , model_name=__lowerCamelCase , repo_name=__lowerCamelCase , dataset_name=args.dataset_name if hasattr(__lowerCamelCase , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__lowerCamelCase , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(__lowerCamelCase , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(__lowerCamelCase , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__lowerCamelCase , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(__lowerCamelCase , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(__lowerCamelCase , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__lowerCamelCase , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__lowerCamelCase , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(__lowerCamelCase , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(__lowerCamelCase , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
lowercase_ = os.path.join(args.output_dir , "README.md" )
model_card.save(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[str] , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase_ = str(Path(__lowerCamelCase ).as_posix() )
lowercase_ = re.search(r"snapshots/([^/]+)/" , __lowerCamelCase )
if search is None:
return None
lowercase_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__lowerCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
SCREAMING_SNAKE_CASE__ = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
SCREAMING_SNAKE_CASE__ = os.path.join(hf_cache_home, """diffusers""")
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
if new_cache_dir is None:
lowercase_ = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase_ = old_diffusers_cache
lowercase_ = Path(__lowerCamelCase ).expanduser()
lowercase_ = Path(__lowerCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase_ = new_cache_dir / old_blob_path.relative_to(__lowerCamelCase )
new_blob_path.parent.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
os.replace(__lowerCamelCase , __lowerCamelCase )
try:
os.symlink(__lowerCamelCase , __lowerCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
SCREAMING_SNAKE_CASE__ = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
SCREAMING_SNAKE_CASE__ = 0
else:
with open(cache_version_file) as f:
try:
SCREAMING_SNAKE_CASE__ = int(f.read())
except ValueError:
SCREAMING_SNAKE_CASE__ = 0
if cache_version < 1:
SCREAMING_SNAKE_CASE__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
SCREAMING_SNAKE_CASE__ = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
if variant is not None:
lowercase_ = weights_name.split("." )
lowercase_ = splits[:-1] + [variant] + splits[-1:]
lowercase_ = ".".join(__lowerCamelCase )
return weights_name
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , *,
__lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: List[Any]=None , ):
'''simple docstring'''
lowercase_ = str(__lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__lowerCamelCase ):
if os.path.isfile(os.path.join(__lowerCamelCase , __lowerCamelCase ) ):
# Load from a PyTorch checkpoint
lowercase_ = os.path.join(__lowerCamelCase , __lowerCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ):
lowercase_ = os.path.join(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return model_file
else:
raise EnvironmentError(
F'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__lowerCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
lowercase_ = hf_hub_download(
__lowerCamelCase , filename=_add_variant(__lowerCamelCase , __lowerCamelCase ) , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , local_files_only=__lowerCamelCase , use_auth_token=__lowerCamelCase , user_agent=__lowerCamelCase , subfolder=__lowerCamelCase , revision=revision or commit_hash , )
warnings.warn(
F'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , __lowerCamelCase , )
return model_file
except: # noqa: E722
warnings.warn(
F'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__lowerCamelCase , __lowerCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__lowerCamelCase , __lowerCamelCase )}\' so that the correct variant file can be added.' , __lowerCamelCase , )
try:
# 2. Load model file as usual
lowercase_ = hf_hub_download(
__lowerCamelCase , filename=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , local_files_only=__lowerCamelCase , use_auth_token=__lowerCamelCase , user_agent=__lowerCamelCase , subfolder=__lowerCamelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
F'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
F'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
F'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
F' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
F' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
F'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
F'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
F'containing a file named {weights_name}' )
| 601
|
from math import sqrt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 0
for i in range(1 , int(sqrt(__lowerCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__lowerCamelCase ):
total += i + n // i
elif i == sqrt(__lowerCamelCase ):
total += i
return total - n
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 1_0000 ):
'''simple docstring'''
lowercase_ = sum(
i
for i in range(1 , __lowerCamelCase )
if sum_of_divisors(sum_of_divisors(__lowerCamelCase ) ) == i and sum_of_divisors(__lowerCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 601
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_MASKED_LM_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ :Optional[Any] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
UpperCAmelCase__ :Optional[Any] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''},
] , )
UpperCAmelCase__ :Optional[Any] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-05,
'''token''': 3_8_0_1_5,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-05,
'''token''': 2_5_5_0_6,
'''token_str''': ''' accuser''',
},
] , )
UpperCAmelCase__ :Tuple = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Dict ):
UpperCAmelCase__ :Dict = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
UpperCAmelCase__ :str = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase__ :Tuple = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase__ :Dict = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
] , )
UpperCAmelCase__ :Tuple = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=6 ) , [
[
{
'''score''': 2.2e-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ :Any = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
UpperCAmelCase__ :List[Any] = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
UpperCAmelCase__ :Tuple = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(__lowerCamelCase )
@slow
@require_tf
def __SCREAMING_SNAKE_CASE ( self : Dict ):
UpperCAmelCase__ :Tuple = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : str , __lowerCamelCase : Optional[Any] ):
UpperCAmelCase__ :List[str] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_08, '''token''': 6_1_0, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_07, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''},
] , )
UpperCAmelCase__ :Dict = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_51,
'''token''': 2_2_0_1,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_14,
'''token''': 1_2_7_9_0,
'''token_str''': ''' Lyon''',
},
] , )
UpperCAmelCase__ :Union[str, Any] = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_00, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_00, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ :Tuple = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
UpperCAmelCase__ :Optional[Any] = None
UpperCAmelCase__ :Dict = None
self.run_pipeline_test(__lowerCamelCase , [] )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ :Tuple = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
UpperCAmelCase__ :Optional[int] = None
UpperCAmelCase__ :Optional[int] = None
self.run_pipeline_test(__lowerCamelCase , [] )
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : int ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
UpperCAmelCase__ :int = FillMaskPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
UpperCAmelCase__ :Any = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Tuple ):
UpperCAmelCase__ :List[str] = fill_masker.tokenizer
UpperCAmelCase__ :str = fill_masker.model
UpperCAmelCase__ :List[str] = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
__lowerCamelCase , [
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
] , )
UpperCAmelCase__ :List[Any] = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
__lowerCamelCase , [
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
] , )
UpperCAmelCase__ :List[Any] = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
__lowerCamelCase , [
[
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
],
[
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
],
] , )
with self.assertRaises(__lowerCamelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__lowerCamelCase ):
fill_masker('''This is''' )
self.run_test_top_k(__lowerCamelCase , __lowerCamelCase )
self.run_test_targets(__lowerCamelCase , __lowerCamelCase )
self.run_test_top_k_targets(__lowerCamelCase , __lowerCamelCase )
self.fill_mask_with_duplicate_targets_and_top_k(__lowerCamelCase , __lowerCamelCase )
self.fill_mask_with_multiple_masks(__lowerCamelCase , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ):
UpperCAmelCase__ :List[Any] = tokenizer.get_vocab()
UpperCAmelCase__ :Optional[int] = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase__ :Optional[Any] = FillMaskPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase , targets=__lowerCamelCase )
UpperCAmelCase__ :Any = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
__lowerCamelCase , [
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
] , )
UpperCAmelCase__ :List[str] = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , __lowerCamelCase )
UpperCAmelCase__ :Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(__lowerCamelCase ) )
# Call argument
UpperCAmelCase__ :Any = FillMaskPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
] , )
UpperCAmelCase__ :Union[str, Any] = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , __lowerCamelCase )
UpperCAmelCase__ :List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(__lowerCamelCase ) )
# Score equivalence
UpperCAmelCase__ :str = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__lowerCamelCase )
UpperCAmelCase__ :str = [top_mask['''token_str'''] for top_mask in outputs]
UpperCAmelCase__ :List[str] = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowerCamelCase ) == set(__lowerCamelCase ):
UpperCAmelCase__ :Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__lowerCamelCase )
UpperCAmelCase__ :int = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__lowerCamelCase ) , nested_simplify(__lowerCamelCase ) )
# Raises with invalid
with self.assertRaises(__lowerCamelCase ):
UpperCAmelCase__ :Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__lowerCamelCase ):
UpperCAmelCase__ :Any = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(__lowerCamelCase ):
UpperCAmelCase__ :Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='''''' )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ):
UpperCAmelCase__ :str = FillMaskPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase , top_k=2 )
UpperCAmelCase__ :Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
__lowerCamelCase , [
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
] , )
UpperCAmelCase__ :int = FillMaskPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
UpperCAmelCase__ :Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
] , )
self.assertEqual(nested_simplify(__lowerCamelCase ) , nested_simplify(__lowerCamelCase ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : str ):
UpperCAmelCase__ :Optional[int] = tokenizer.get_vocab()
UpperCAmelCase__ :int = FillMaskPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
# top_k=2, ntargets=3
UpperCAmelCase__ :List[Any] = sorted(vocab.keys() )[:3]
UpperCAmelCase__ :List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=__lowerCamelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase__ :Dict = [el['''token_str'''] for el in sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x["score"] , reverse=__lowerCamelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowerCamelCase ).issubset(__lowerCamelCase ):
UpperCAmelCase__ :int = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=__lowerCamelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__lowerCamelCase ) , nested_simplify(__lowerCamelCase ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
UpperCAmelCase__ :List[str] = FillMaskPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
UpperCAmelCase__ :str = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase__ :List[Any] = sorted(vocab.keys() )[:3]
UpperCAmelCase__ :int = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase__ :int = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=__lowerCamelCase , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__lowerCamelCase ) , 3 )
def __SCREAMING_SNAKE_CASE ( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
UpperCAmelCase__ :str = FillMaskPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
UpperCAmelCase__ :str = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
],
[
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
],
[
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
{'''sequence''': ANY(__lowerCamelCase ), '''score''': ANY(__lowerCamelCase ), '''token''': ANY(__lowerCamelCase ), '''token_str''': ANY(__lowerCamelCase )},
],
] , )
| 467
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = "segformer"
def __init__( self : Dict , __lowerCamelCase : Dict=3 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : List[Any]=[2, 2, 2, 2] , __lowerCamelCase : List[str]=[8, 4, 2, 1] , __lowerCamelCase : Optional[Any]=[3_2, 6_4, 1_6_0, 2_5_6] , __lowerCamelCase : Tuple=[7, 3, 3, 3] , __lowerCamelCase : Tuple=[4, 2, 2, 2] , __lowerCamelCase : Any=[1, 2, 5, 8] , __lowerCamelCase : Optional[Any]=[4, 4, 4, 4] , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=1e-6 , __lowerCamelCase : List[Any]=2_5_6 , __lowerCamelCase : int=2_5_5 , **__lowerCamelCase : str , ):
super().__init__(**__lowerCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , __lowerCamelCase , )
UpperCAmelCase__ :Dict = num_channels
UpperCAmelCase__ :List[Any] = num_encoder_blocks
UpperCAmelCase__ :List[str] = depths
UpperCAmelCase__ :str = sr_ratios
UpperCAmelCase__ :Optional[Any] = hidden_sizes
UpperCAmelCase__ :List[str] = patch_sizes
UpperCAmelCase__ :List[str] = strides
UpperCAmelCase__ :List[str] = mlp_ratios
UpperCAmelCase__ :Dict = num_attention_heads
UpperCAmelCase__ :Tuple = hidden_act
UpperCAmelCase__ :List[str] = hidden_dropout_prob
UpperCAmelCase__ :List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ :int = classifier_dropout_prob
UpperCAmelCase__ :str = initializer_range
UpperCAmelCase__ :List[str] = drop_path_rate
UpperCAmelCase__ :Dict = layer_norm_eps
UpperCAmelCase__ :List[str] = decoder_hidden_size
UpperCAmelCase__ :Optional[int] = kwargs.get('''reshape_last_stage''' , __lowerCamelCase )
UpperCAmelCase__ :str = semantic_loss_ignore_index
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
return 1_2
| 467
| 1
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Any = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE :Any = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def snake_case__ ( self : int , **a__ : List[str] ):
__magic_name__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**a__ )
return config
def snake_case__ ( self : Dict , **a__ : Tuple ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(**a__ )
__magic_name__ = scheduler_class(**a__ )
__magic_name__ , __magic_name__ = 10, 0.0
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for t in scheduler.timesteps:
__magic_name__ = model(a__ , a__ )
__magic_name__ = scheduler.step(a__ , a__ , a__ , a__ ).prev_sample
return sample
def snake_case__ ( self : List[Any] ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def snake_case__ ( self : str ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a__ )
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(steps_offset=1 )
__magic_name__ = scheduler_class(**a__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def snake_case__ ( self : int ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def snake_case__ ( self : Tuple ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a__ )
def snake_case__ ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def snake_case__ ( self : Union[str, Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a__ )
def snake_case__ ( self : Dict ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=a__ )
def snake_case__ ( self : List[Any] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=a__ )
def snake_case__ ( self : Optional[Any] ):
self.check_over_configs(thresholding=a__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , )
def snake_case__ ( self : List[str] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=a__ )
def snake_case__ ( self : List[Any] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=a__ , num_inference_steps=a__ )
def snake_case__ ( self : Optional[Any] ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=a__ , eta=a__ )
def snake_case__ ( self : Tuple ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**a__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def snake_case__ ( self : str ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**a__ )
__magic_name__ , __magic_name__ = 10, 0.0
scheduler.set_timesteps(a__ )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter
__magic_name__ = self.dummy_sample_deter + 0.1
__magic_name__ = self.dummy_sample_deter - 0.1
__magic_name__ = samplea.shape[0]
__magic_name__ = torch.stack([samplea, samplea, samplea] , dim=0 )
__magic_name__ = torch.arange(a__ )[0:3, None].repeat(1 , a__ )
__magic_name__ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__magic_name__ = scheduler.batch_step_no_noise(a__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , a__ )
__magic_name__ = torch.sum(torch.abs(a__ ) )
__magic_name__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1E-2
assert abs(result_mean.item() - 0.4_982 ) < 1E-3
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.full_loop()
__magic_name__ = torch.sum(torch.abs(a__ ) )
__magic_name__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1E-2
assert abs(result_mean.item() - 0.223_967 ) < 1E-3
def snake_case__ ( self : str ):
__magic_name__ = self.full_loop(prediction_type='''v_prediction''' )
__magic_name__ = torch.sum(torch.abs(a__ ) )
__magic_name__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1E-2
assert abs(result_mean.item() - 0.0_684 ) < 1E-3
def snake_case__ ( self : int ):
# We specify different beta, so that the first alpha is 0.99
__magic_name__ = self.full_loop(set_alpha_to_one=a__ , beta_start=0.01 )
__magic_name__ = torch.sum(torch.abs(a__ ) )
__magic_name__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1E-2
assert abs(result_mean.item() - 0.1_951 ) < 1E-3
def snake_case__ ( self : Optional[Any] ):
# We specify different beta, so that the first alpha is 0.99
__magic_name__ = self.full_loop(set_alpha_to_one=a__ , beta_start=0.01 )
__magic_name__ = torch.sum(torch.abs(a__ ) )
__magic_name__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1E-2
assert abs(result_mean.item() - 0.1_941 ) < 1E-3
| 245
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :List[str] = """biogpt"""
def __init__( self : Union[str, Any] , a__ : Dict=4_2384 , a__ : Union[str, Any]=1024 , a__ : List[Any]=24 , a__ : Any=16 , a__ : List[Any]=4096 , a__ : Any="gelu" , a__ : Optional[int]=0.1 , a__ : List[Any]=0.1 , a__ : Optional[Any]=1024 , a__ : Union[str, Any]=0.02 , a__ : int=1E-12 , a__ : List[Any]=True , a__ : Tuple=True , a__ : str=0.0 , a__ : Any=0.0 , a__ : Optional[int]=1 , a__ : Tuple=0 , a__ : Dict=2 , **a__ : Tuple , ):
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = scale_embedding
__magic_name__ = use_cache
__magic_name__ = layerdrop
__magic_name__ = activation_dropout
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
| 245
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__lowerCamelCase = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 96
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656
| 0
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a__ : Tuple = None
a__ : Tuple = logging.get_logger(__name__)
a__ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a__ : Any = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a__ : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =VOCAB_FILES_NAMES
_lowerCamelCase =PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase =["input_ids", "attention_mask"]
_lowerCamelCase =TaTokenizer
_lowerCamelCase =[]
def __init__( self : Tuple , a__ : str=None , a__ : Tuple=None , a__ : int="</s>" , a__ : List[str]="<unk>" , a__ : Tuple="<pad>" , a__ : Tuple=100 , a__ : List[Any]=None , **a__ : Optional[Any] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase = [f"<extra_id_{i}>" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase = len(set(filter(lambda a__ : bool('''extra_id_''' in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
a__ , tokenizer_file=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , **a__ , )
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
UpperCAmelCase = extra_ids
@staticmethod
def __snake_case ( a__ : int , a__ : Optional[int] , a__ : Any ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , a__ , )
return max_model_length
def __snake_case ( self : str , a__ : str , a__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def __snake_case ( self : Any , a__ : List[int] , a__ : Optional[List[int]] = None ):
UpperCAmelCase = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __snake_case ( self : Dict , a__ : List[int] , a__ : Optional[List[int]] = None ):
UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __snake_case ( self : List[Any] ):
return list(
set(filter(lambda a__ : bool(re.search(R'''<extra_id_\d+>''' , a__ ) ) is not None , self.additional_special_tokens ) ) )
def __snake_case ( self : Union[str, Any] ):
return [self.convert_tokens_to_ids(a__ ) for token in self.get_sentinel_tokens()]
| 570
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
a__ : Tuple = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
a__ : List[str] = 'UperNetConfig'
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : int , a__ : int , a__ : Union[int, Tuple[int, int]] , a__ : Union[int, Tuple[int, int], str] = 0 , a__ : bool = False , a__ : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
UpperCAmelCase = nn.Convad(
in_channels=a__ , out_channels=a__ , kernel_size=a__ , padding=a__ , bias=a__ , dilation=a__ , )
UpperCAmelCase = nn.BatchNormad(a__ )
UpperCAmelCase = nn.ReLU()
def __snake_case ( self : Optional[int] , a__ : torch.Tensor ):
UpperCAmelCase = self.conv(a__ )
UpperCAmelCase = self.batch_norm(a__ )
UpperCAmelCase = self.activation(a__ )
return output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , a__ : int , a__ : int , a__ : int ):
super().__init__()
UpperCAmelCase = [
nn.AdaptiveAvgPoolad(a__ ),
UperNetConvModule(a__ , a__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(a__ ) , a__ )
def __snake_case ( self : Dict , a__ : torch.Tensor ):
UpperCAmelCase = input
for layer in self.layers:
UpperCAmelCase = layer(a__ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : Tuple[int, ...] , a__ : int , a__ : int , a__ : bool ):
super().__init__()
UpperCAmelCase = pool_scales
UpperCAmelCase = align_corners
UpperCAmelCase = in_channels
UpperCAmelCase = channels
UpperCAmelCase = []
for i, pool_scale in enumerate(a__ ):
UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=a__ , in_channels=a__ , channels=a__ )
self.blocks.append(a__ )
self.add_module(str(a__ ) , a__ )
def __snake_case ( self : str , a__ : torch.Tensor ):
UpperCAmelCase = []
for ppm in self.blocks:
UpperCAmelCase = ppm(a__ )
UpperCAmelCase = nn.functional.interpolate(
a__ , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(a__ )
return ppm_outs
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , a__ : Dict , a__ : int ):
super().__init__()
UpperCAmelCase = config
UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCAmelCase = in_channels
UpperCAmelCase = config.hidden_size
UpperCAmelCase = False
UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCAmelCase = nn.ModuleList()
UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCAmelCase = UperNetConvModule(a__ , self.channels , kernel_size=1 )
UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(a__ )
self.fpn_convs.append(a__ )
UpperCAmelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __snake_case ( self : List[str] ):
self.apply(self._init_weights )
def __snake_case ( self : Tuple , a__ : Dict ):
if isinstance(a__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : List[str] , a__ : Optional[Any] ):
UpperCAmelCase = inputs[-1]
UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(a__ ) )
UpperCAmelCase = torch.cat(a__ , dim=1 )
UpperCAmelCase = self.bottleneck(a__ )
return output
def __snake_case ( self : Tuple , a__ : torch.Tensor ):
# build laterals
UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(a__ ) )
# build top-down path
UpperCAmelCase = len(a__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase = laterals[i - 1].shape[2:]
UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=a__ , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCAmelCase = torch.cat(a__ , dim=1 )
UpperCAmelCase = self.fpn_bottleneck(a__ )
UpperCAmelCase = self.classifier(a__ )
return output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , a__ : Any , a__ : int = 2 , a__ : int = 3 , a__ : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
UpperCAmelCase = config
UpperCAmelCase = config.auxiliary_in_channels
UpperCAmelCase = config.auxiliary_channels
UpperCAmelCase = config.auxiliary_num_convs
UpperCAmelCase = config.auxiliary_concat_input
UpperCAmelCase = in_index
UpperCAmelCase = (kernel_size // 2) * dilation
UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=a__ , padding=a__ , dilation=a__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=a__ , padding=a__ , dilation=a__ ) )
if self.num_convs == 0:
UpperCAmelCase = nn.Identity()
else:
UpperCAmelCase = nn.Sequential(*a__ )
if self.concat_input:
UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=a__ , padding=kernel_size // 2 )
UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __snake_case ( self : List[str] ):
self.apply(self._init_weights )
def __snake_case ( self : Union[str, Any] , a__ : Optional[Any] ):
if isinstance(a__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : Any , a__ : torch.Tensor ):
# just take the relevant feature maps
UpperCAmelCase = encoder_hidden_states[self.in_index]
UpperCAmelCase = self.convs(a__ )
if self.concat_input:
UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCAmelCase = self.classifier(a__ )
return output
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =UperNetConfig
_lowerCamelCase ="pixel_values"
_lowerCamelCase =True
def __snake_case ( self : Dict , a__ : List[str] ):
if isinstance(a__ , a__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __snake_case ( self : Any ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __snake_case ( self : Union[str, Any] , a__ : Tuple , a__ : Optional[Any]=False ):
if isinstance(a__ , a__ ):
UpperCAmelCase = value
a__ : Union[str, Any] = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
a__ : Union[str, Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , UpperCAmelCase_ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , a__ : int ):
super().__init__(a__ )
UpperCAmelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCAmelCase = UperNetHead(a__ , in_channels=self.backbone.channels )
UpperCAmelCase = UperNetFCNHead(a__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=a__ , config_class=_CONFIG_FOR_DOC )
def __snake_case ( self : Tuple , a__ : Optional[torch.Tensor] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , a__ : Optional[torch.Tensor] = None , a__ : Optional[bool] = None , ):
UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
a__ , output_hidden_states=a__ , output_attentions=a__ )
UpperCAmelCase = outputs.feature_maps
UpperCAmelCase = self.decode_head(a__ )
UpperCAmelCase = nn.functional.interpolate(a__ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=a__ )
UpperCAmelCase = None
if self.auxiliary_head is not None:
UpperCAmelCase = self.auxiliary_head(a__ )
UpperCAmelCase = nn.functional.interpolate(
a__ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=a__ )
UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCAmelCase = loss_fct(a__ , a__ )
UpperCAmelCase = loss_fct(a__ , a__ )
UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCAmelCase = (logits,) + outputs[1:]
else:
UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=a__ , logits=a__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 570
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __magic_name__ ( _a , _a):
_UpperCAmelCase : int = 1
@register_to_config
def __init__( self : str ,__SCREAMING_SNAKE_CASE : int=2_0_0_0 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 ,__SCREAMING_SNAKE_CASE : Dict=2_0 ,__SCREAMING_SNAKE_CASE : Optional[int]=1e-3 ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Union[str, torch.device] = None ):
UpperCAmelCase = torch.linspace(1 ,self.config.sampling_eps ,__SCREAMING_SNAKE_CASE ,device=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[Any]=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCAmelCase = std.unsqueeze(-1 )
UpperCAmelCase = -score / std
# compute
UpperCAmelCase = -1.0 / len(self.timesteps )
UpperCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCAmelCase = beta_t.unsqueeze(-1 )
UpperCAmelCase = -0.5 * beta_t * x
UpperCAmelCase = torch.sqrt(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = drift - diffusion**2 * score
UpperCAmelCase = x + drift * dt
# add noise
UpperCAmelCase = randn_tensor(x.shape ,layout=x.layout ,generator=__SCREAMING_SNAKE_CASE ,device=x.device ,dtype=x.dtype )
UpperCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[Any] ):
return self.config.num_train_timesteps
| 333
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__lowerCAmelCase ={
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__lowerCAmelCase =AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"HTSAT-tiny" , "roberta" , _lowerCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_lowerCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R".*sequential.(\d+).*"
UpperCAmelCase = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_lowerCAmelCase , _lowerCAmelCase )
if re.match(_lowerCAmelCase , _lowerCAmelCase ):
# replace sequential layers with list
UpperCAmelCase = re.match(_lowerCAmelCase , _lowerCAmelCase ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_lowerCAmelCase )//3}.linear.''' )
elif re.match(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase = int(re.match(_lowerCAmelCase , _lowerCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_lowerCAmelCase , enable_fusion=_lowerCAmelCase )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_lowerCAmelCase )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_lowerCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
transformers_config.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
__lowerCAmelCase =parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 333
| 1
|
'''simple docstring'''
import sys
__snake_case: List[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _snake_case ( A_ : Optional[int] = N ):
"""simple docstring"""
a_ : List[str] = -sys.maxsize - 1
for i in range(len(A_ ) - 12 ):
a_ : Dict = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
a_ : Dict = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 701
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 42
a_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 460
| 0
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase :List[str] = logging.get_logger(__name__)
lowerCamelCase :Optional[Any] = '''▁'''
lowerCamelCase :str = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
lowerCamelCase :Optional[int] = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
lowerCamelCase :Optional[Any] = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
lowerCamelCase :Union[str, Any] = {
'''ernie-m-base''': 5_1_4,
'''ernie-m-large''': 5_1_4,
}
lowerCamelCase :Tuple = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ["input_ids"]
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__(self , lowercase , lowercase=None , lowercase=False , lowercase="utf8" , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase = None , **lowercase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , vocab_file=lowercase , encoding=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
A_ : Union[str, Any] = do_lower_case
A_ : List[str] = sentencepiece_model_ckpt
A_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A_ : List[Any] = self.load_vocab(filepath=lowercase )
else:
A_ : List[str] = {self.sp_model.id_to_piece(lowercase ): id for id in range(self.sp_model.get_piece_size() )}
A_ : Union[str, Any] = {v: k for k, v in self.vocab.items()}
def _a (self , lowercase ):
if text is None:
return None
A_ : str = self.tokenize(lowercase )
A_, A_ : List[Any] = """""", []
for i, ch in enumerate(lowercase ):
if ch in self.SP_CHAR_MAPPING:
A_ : Optional[Any] = self.SP_CHAR_MAPPING.get(lowercase )
else:
A_ : Dict = unicodedata.normalize("""NFKC""" , lowercase )
if self.is_whitespace(lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(lowercase ) )
A_, A_, A_ : str = normalized_text, [], 0
if self.do_lower_case:
A_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A_ : Optional[int] = token[1:]
A_ : List[Any] = text[offset:].index(lowercase ) + offset
A_ : Any = start + len(lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
A_ : Optional[int] = end
return token_mapping
@property
def _a (self ):
return len(self.vocab )
def _a (self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__(self ):
A_ : int = self.__dict__.copy()
A_ : Optional[int] = None
return state
def __setstate__(self , lowercase ):
A_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Union[str, Any] = {}
A_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _a (self , lowercase ):
return "".join((self.SP_CHAR_MAPPING.get(lowercase , lowercase ) for c in text) )
def _a (self , lowercase , lowercase=False , lowercase=64 , lowercase=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
A_ : Dict = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
A_ : List[str] = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
A_ : Optional[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
A_ : Any = self.sp_model.EncodeAsPieces(lowercase )
else:
A_ : Optional[Any] = self.sp_model.SampleEncodeAsPieces(lowercase , lowercase , lowercase )
A_ : Optional[int] = []
for pi, piece in enumerate(lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(lowercase ) and pi != 0:
new_pieces.append(lowercase )
continue
else:
continue
A_ : List[Any] = 0
for i, chunk in enumerate(lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(lowercase ) or self.is_punct(lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(lowercase )
A_ : Optional[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A_ : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A_ : Optional[int] = i
if len(lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _a (self , lowercase ):
A_ : Tuple = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def _a (self , lowercase ):
A_ : Tuple = self.convert_ids_to_tokens(lowercase )
A_ : Optional[int] = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def _a (self , lowercase ):
return self.vocab.get(lowercase , self.vocab.get(self.unk_token ) )
def _a (self , lowercase ):
return self.reverse_vocab.get(lowercase , self.unk_token )
def _a (self , lowercase , lowercase=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : Dict = [self.cls_token_id]
A_ : Any = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _a (self , lowercase , lowercase=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _a (self , lowercase , lowercase=None , lowercase=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1]
def _a (self , lowercase , lowercase = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(lowercase ) + 1) + [1] * (len(lowercase ) + 3)
def _a (self , lowercase ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _a (self , lowercase ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _a (self , lowercase ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _a (self , lowercase ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(lowercase ) == 1:
A_ : Union[str, Any] = unicodedata.category(lowercase )
if cat == "Zs":
return True
return False
def _a (self , lowercase ):
A_ : str = {}
with io.open(lowercase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(lowercase ):
A_ : Optional[int] = line.rstrip("""\n""" )
A_ : str = int(lowercase )
return token_to_idx
def _a (self , lowercase , lowercase = None ):
A_ : Dict = 0
if os.path.isdir(lowercase ):
A_ : List[Any] = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
A_ : List[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(lowercase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
A_ : Tuple = token_index
writer.write(token + """\n""" )
index += 1
A_ : str = os.path.join(lowercase , """sentencepiece.bpe.model""" )
with open(lowercase , """wb""" ) as fi:
A_ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (vocab_file,)
| 667
|
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 667
| 1
|
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__lowerCamelCase : Any = parse(importlib.metadata.version('''torch'''))
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
snake_case_ : int = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase__ ,lowercase__ ):
snake_case_ : Any = parse(importlib.metadata.version(lowercase__ ) )
return operation(lowercase__ ,parse(lowercase__ ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
return compare_versions(lowercase__ ,lowercase__ ,lowercase__ )
| 702
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656
| 0
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : Tuple = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self , **_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_lowerCamelCase )
return config
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCamelCase )
__lowercase = len(_lowerCamelCase )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
__lowercase = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCamelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type="v_prediction" )
__lowercase = scheduler_class(**_lowerCamelCase )
__lowercase = len(_lowerCamelCase )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
__lowercase = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCamelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCamelCase )
__lowercase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCamelCase )
__lowercase = scheduler.timesteps
for i, timestep in enumerate(_lowerCamelCase ):
if i == len(_lowerCamelCase ) - 1:
__lowercase = -1
else:
__lowercase = timesteps[i + 1]
__lowercase = scheduler.previous_timestep(_lowerCamelCase )
__lowercase = prev_t.item()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCamelCase )
__lowercase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCamelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCamelCase )
__lowercase = [100, 87, 50, 1, 0]
__lowercase = len(_lowerCamelCase )
with self.assertRaises(_lowerCamelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCamelCase )
__lowercase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
| 118
|
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A_ : Optional[Any] =DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A_ : Tuple ='''main'''
# Default branch name
A_ : Dict ='''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
A_ : int ='''aaaaaaa'''
# This commit does not exist, so we should 404.
A_ : List[str] ='''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
A_ : List[str] ='''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def snake_case_ ( ) -> Tuple:
print('''Welcome!''')
yield
print('''Bye!''')
@contextlib.contextmanager
def snake_case_ ( ) -> str:
print('''Bonjour!''')
yield
print('''Au revoir!''')
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def UpperCAmelCase_ ( self ):
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] )
class __UpperCAmelCase ( __a ):
pass
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
@require_tf
def UpperCAmelCase_ ( self ):
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] )
class __UpperCAmelCase ( __a ):
pass
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
@require_flax
def UpperCAmelCase_ ( self ):
# Flax models don't have labels
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
class __UpperCAmelCase ( __a ):
pass
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
| 274
| 0
|
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: list[list[float]] = []
for data in source_data:
for i, el in enumerate(__magic_name__ ):
if len(__magic_name__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__magic_name__ ) )
return data_lists
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
_lowercase: list[list[float]] = []
for dlist, weight in zip(__magic_name__ , __magic_name__ ):
_lowercase: Tuple = min(__magic_name__ )
_lowercase: str = max(__magic_name__ )
_lowercase: list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowercase: Dict = f"Invalid weight of {weight:f} provided"
raise ValueError(__magic_name__ )
score_lists.append(__magic_name__ )
return score_lists
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__magic_name__ ):
_lowercase: Any = final_scores[j] + ele
return final_scores
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
_lowercase: Optional[int] = get_data(__magic_name__ )
_lowercase: List[Any] = calculate_each_score(__magic_name__ , __magic_name__ )
_lowercase: str = generate_final_scores(__magic_name__ )
# append scores to source data
for i, ele in enumerate(__magic_name__ ):
source_data[i].append(__magic_name__ )
return source_data
| 206
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : Tuple , **_UpperCamelCase : int):
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , "vision")
self.check_model_type(_UpperCamelCase)
def __call__( self : Any , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Any):
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[int] , **_UpperCamelCase : Union[str, Any]):
return {}, {}, {}
def UpperCAmelCase__ ( self : Any , _UpperCamelCase : Union[str, Any]):
_lowercase: Dict = load_image(_UpperCamelCase)
_lowercase: List[str] = image.size
_lowercase: List[str] = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Optional[int]):
_lowercase: str = self.model(**_UpperCamelCase)
return model_outputs
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : List[Any]):
_lowercase: Optional[int] = model_outputs.predicted_depth
_lowercase: int = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) , size=self.image_size[::-1] , mode="bicubic" , align_corners=_UpperCamelCase)
_lowercase: str = prediction.squeeze().cpu().numpy()
_lowercase: Dict = (output * 255 / np.max(_UpperCamelCase)).astype("uint8")
_lowercase: List[str] = Image.fromarray(_UpperCamelCase)
_lowercase: Union[str, Any] = {}
_lowercase: List[Any] = predicted_depth
_lowercase: Tuple = depth
return output_dict
| 206
| 1
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_snake_case = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_snake_case = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ):
_A : Optional[int] = (
list(range(ord("""!""" ),ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ),ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ),ord("""ÿ""" ) + 1 ) )
)
_A : int = bs[:]
_A : List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
_A : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_,snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = set()
_A : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ) -> Union[str, Any]:
_A : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
_A : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_A : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
_A : Union[str, Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
_A : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
_A : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding="""utf-8""" ) as vocab_handle:
_A : int = json.load(_a )
_A : Any = {v: k for k, v in self.encoder.items()}
_A : Tuple = errors # how to handle errors in decoding
_A : Union[str, Any] = bytes_to_unicode()
_A : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding="""utf-8""" ) as merges_handle:
_A : int = merges_handle.read().split("""\n""" )[1:-1]
_A : Any = [tuple(merge.split() ) for merge in bpe_merges]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : int = {}
_A : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_A : Optional[int] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a__ ( self ) -> Union[str, Any]:
return len(self.encoder )
def a__ ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , _a ) -> List[str]:
if token in self.cache:
return self.cache[token]
_A : List[str] = tuple(_a )
_A : Optional[int] = get_pairs(_a )
if not pairs:
return token
while True:
_A : List[Any] = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A : Tuple = bigram
_A : Optional[Any] = []
_A : List[str] = 0
while i < len(_a ):
try:
_A : List[Any] = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A : str = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A : Optional[Any] = tuple(_a )
_A : Tuple = new_word
if len(_a ) == 1:
break
else:
_A : Any = get_pairs(_a )
_A : Tuple = """ """.join(_a )
_A : Optional[Any] = word
return word
def a__ ( self , _a ) -> List[Any]:
_A : Tuple = []
for token in re.findall(self.pat , _a ):
_A : Optional[int] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def a__ ( self , _a ) -> List[Any]:
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def a__ ( self , _a ) -> Optional[int]:
return self.decoder.get(_a )
def a__ ( self , _a ) -> Optional[int]:
_A : List[str] = """""".join(_a )
_A : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : Optional[int] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Any = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + """\n""" )
_A : Dict = 0
with open(_a , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A : Any = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def a__ ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A : Union[str, Any] = [self.cls_token_id]
_A : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def a__ ( self , _a , _a = None ) -> List[int]:
_A : int = [self.sep_token_id]
_A : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self , _a , _a=False , **_a ) -> Any:
_A : Optional[int] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
_A : int = """ """ + text
return (text, kwargs)
def a__ ( self , _a , _a = None , _a = PaddingStrategy.DO_NOT_PAD , _a = None , _a = None , ) -> dict:
_A : Optional[Any] = super()._pad(
encoded_inputs=_a , max_length=_a , padding_strategy=_a , pad_to_multiple_of=_a , return_attention_mask=_a , )
# Load from model defaults
if return_attention_mask is None:
_A : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_A : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_A : str = len(encoded_inputs["""global_attention_mask"""] ) != len(_a )
if needs_to_be_padded:
_A : Any = len(_a ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_A : Union[str, Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
_A : List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 307
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowercase ( UpperCamelCase__ ):
_a = "informer"
_a = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _a = None , _a = None , _a = "student_t" , _a = "nll" , _a = 1 , _a = None , _a = "mean" , _a = 0 , _a = 0 , _a = 0 , _a = 0 , _a = None , _a = None , _a = 64 , _a = 32 , _a = 32 , _a = 2 , _a = 2 , _a = 2 , _a = 2 , _a = True , _a = "gelu" , _a = 0.05 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 100 , _a = 0.02 , _a=True , _a = "prob" , _a = 5 , _a = True , **_a , ) -> Tuple:
# time series specific configuration
_A : Optional[int] = prediction_length
_A : int = context_length or prediction_length
_A : List[str] = distribution_output
_A : Dict = loss
_A : Optional[Any] = input_size
_A : Dict = num_time_features
_A : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_A : Dict = scaling
_A : List[Any] = num_dynamic_real_features
_A : Union[str, Any] = num_static_real_features
_A : Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_A : Any = cardinality
else:
_A : Union[str, Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_A : Tuple = embedding_dimension
else:
_A : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_A : List[str] = num_parallel_samples
# Transformer architecture configuration
_A : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_A : int = d_model
_A : int = encoder_attention_heads
_A : List[str] = decoder_attention_heads
_A : Any = encoder_ffn_dim
_A : Union[str, Any] = decoder_ffn_dim
_A : Dict = encoder_layers
_A : Dict = decoder_layers
_A : Tuple = dropout
_A : Any = attention_dropout
_A : int = activation_dropout
_A : Optional[int] = encoder_layerdrop
_A : List[str] = decoder_layerdrop
_A : Optional[int] = activation_function
_A : Optional[Any] = init_std
_A : Any = use_cache
# Informer
_A : str = attention_type
_A : Any = sampling_factor
_A : Union[str, Any] = distil
super().__init__(is_encoder_decoder=_a , **_a )
@property
def a__ ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 307
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCamelCase__ )
lowercase__ : Any = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCamelCase__ )
env_command_parser(subparsers=lowerCamelCase__ )
launch_command_parser(subparsers=lowerCamelCase__ )
tpu_command_parser(subparsers=lowerCamelCase__ )
test_command_parser(subparsers=lowerCamelCase__ )
# Let's go
lowercase__ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCamelCase__ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 709
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ):
super().__init__()
lowercase__ : str = layers_per_block
lowercase__ : int = torch.nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = nn.ModuleList([] )
# down
lowercase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = output_channel
lowercase__ : Dict = block_out_channels[i]
lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Union[str, Any] = get_down_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
lowercase__ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# out
lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Tuple = 2 * out_channels if double_z else out_channels
lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : Tuple = False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = x
lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Dict ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# middle
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE )
# middle
lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE )
# post-process
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ):
super().__init__()
lowercase__ : List[str] = layers_per_block
lowercase__ : int = nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Optional[Any] = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : List[str] = in_channels if norm_type == "spatial" else None
# mid
lowercase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# up
lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = output_channel
lowercase__ : List[Any] = reversed_block_out_channels[i]
lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Dict = get_up_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : List[Any] = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ):
lowercase__ : Tuple = z
lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ):
super().__init__()
lowercase__ : List[Any] = n_e
lowercase__ : List[str] = vq_embed_dim
lowercase__ : Optional[Any] = beta
lowercase__ : List[str] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Union[str, Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Tuple = self.used.shape[0]
lowercase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : Tuple = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : Union[str, Any] = sane_index_shape
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : List[str] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : List[Any] = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : int = 0 # simply set to zero
lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE )
return back.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape )
lowercase__ : Dict = None
lowercase__ : int = None
# compute loss for embedding
if not self.legacy:
lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE )
if shape is not None:
lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ):
lowercase__ : Dict = parameters
lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 )
lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Optional[int] = deterministic
lowercase__ : Tuple = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : Tuple = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : str = self.mean + self.std * sample
return x
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
return self.mean
| 81
| 0
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A = logging.getLogger(__name__)
def lowerCAmelCase_ ( __a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = False , ) -> str:
"""simple docstring"""
lowerCamelCase__: int =bnb_quantization_config.load_in_abit
lowerCamelCase__: Any =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
lowerCamelCase__: List[Any] =[]
# custom device map
if isinstance(__a , __a ) and len(device_map.keys() ) > 1:
lowerCamelCase__: Optional[int] =[key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__: Any =get_keys_to_not_convert(__a )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__a )
lowerCamelCase__: List[str] =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: int =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__a )
# compatibility with peft
lowerCamelCase__: List[str] =load_in_abit
lowerCamelCase__: int =load_in_abit
lowerCamelCase__: Tuple =get_parameter_device(__a )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
lowerCamelCase__: Tuple =replace_with_bnb_layers(__a , __a , modules_to_not_convert=__a )
# convert param to the right dtype
lowerCamelCase__: Dict =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__: str =name.replace(".weight" , "" ).replace(".bias" , "" )
lowerCamelCase__: Optional[Any] =getattr(__a , __a , __a )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__a ):
param.to(__a )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCamelCase__: str =replace_with_bnb_layers(
__a , __a , modules_to_not_convert=__a )
lowerCamelCase__: Optional[Any] =get_quantized_model_device_map(
__a , __a , __a , max_memory=__a , no_split_module_classes=__a , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__: Any =True
lowerCamelCase__: List[str] =any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
__a , __a , __a , dtype=bnb_quantization_config.torch_dtype , offload_folder=__a , offload_state_dict=__a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__a , device_map=__a , offload_dir=__a )
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None ) -> str:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__: str ={"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(__a , __a ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
lowerCamelCase__: Optional[int] ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__: Optional[Any] ={}
lowerCamelCase__: str =special_dtypes
lowerCamelCase__: List[str] =no_split_module_classes
lowerCamelCase__: Dict =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__: Optional[Any] =get_balanced_memory(
__a , low_zero=(device_map == "balanced_low_0") , max_memory=__a , **__a , )
lowerCamelCase__: Union[str, Any] =max_memory
lowerCamelCase__: Dict =infer_auto_device_map(__a , **__a )
if isinstance(__a , __a ):
# check if don't have any quantized module on the cpu
lowerCamelCase__: Union[str, Any] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__: List[Any] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
lowerCamelCase__: List[Any] =[]
lowerCamelCase__ , lowerCamelCase__: Any =_replace_with_bnb_layers(
__a , __a , __a , __a )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Optional[int] =False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__: Optional[Any] =[]
current_key_name.append(__a )
if isinstance(__a , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__: List[str] =".".join(__a )
lowerCamelCase__: Optional[Any] =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__: int =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__: Optional[int] =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__a , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__: Dict =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
lowerCamelCase__: Dict =module.weight.data
if module.bias is not None:
lowerCamelCase__: List[Any] =module.bias.data
bnb_module.requires_grad_(__a )
setattr(__a , __a , __a )
lowerCamelCase__: int =True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__: List[str] =_replace_with_bnb_layers(
__a , __a , __a , __a )
lowerCamelCase__: Union[str, Any] =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
with init_empty_weights():
lowerCamelCase__: Any =deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__: str =find_tied_parameters(__a )
# For compatibility with Accelerate < 0.18
if isinstance(__a , __a ):
lowerCamelCase__: int =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__: str =sum(__a , [] )
lowerCamelCase__: str =len(__a ) > 0
# Check if it is a base model
lowerCamelCase__: Optional[Any] =False
if hasattr(__a , "base_model_prefix" ):
lowerCamelCase__: Union[str, Any] =not hasattr(__a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__: Optional[int] =list(model.named_children() )
lowerCamelCase__: Optional[int] =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__: Union[str, Any] =set(__a ) - set(__a )
lowerCamelCase__: List[str] =list(set(__a ) ) + list(__a )
# remove ".weight" from the keys
lowerCamelCase__: List[Any] =[".weight", ".bias"]
lowerCamelCase__: Tuple =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__: Optional[Any] =name.replace(__a , "" )
filtered_module_names.append(__a )
return filtered_module_names
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
for m in model.modules():
if isinstance(__a , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__a , __a , 0 , dtype=__a , value=__a )
lowerCamelCase__: Dict =param_name
lowerCamelCase__: Tuple =model
if "." in tensor_name:
lowerCamelCase__: Any =tensor_name.split("." )
for split in splits[:-1]:
lowerCamelCase__: Any =getattr(__a , __a )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCamelCase__: str =new_module
lowerCamelCase__: int =splits[-1]
# offload weights
lowerCamelCase__: str =False
offload_weight(module._parameters[tensor_name] , __a , __a , index=__a )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __a , index=__a , )
else:
offload_weight(__a , __a , __a , index=__a )
offload_weight(__a , param_name.replace("weight" , "SCB" ) , __a , index=__a )
set_module_tensor_to_device(__a , __a , "meta" , dtype=__a , value=torch.empty(*param.size() ) )
| 59
|
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def UpperCamelCase__ ( self : str , UpperCamelCase__ : str ) -> Tuple:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as input_file:
_UpperCamelCase =re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
_UpperCamelCase =input_file.read()
_UpperCamelCase =regexp.search(UpperCamelCase__ )
return match
def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ) -> str:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as input_file:
_UpperCamelCase =re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
_UpperCamelCase =input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase =regexp.finditer(UpperCamelCase__ )
_UpperCamelCase =[match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self : int ) -> Optional[Any]:
_UpperCamelCase =Path('''./datasets''' )
_UpperCamelCase =list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCamelCase__ ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def UpperCamelCase__ ( self : Any ) -> Optional[int]:
_UpperCamelCase =Path('''./datasets''' )
_UpperCamelCase =list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCamelCase__ ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 404
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase : Optional[Any] = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
def lowerCAmelCase__ ( _a : int ):
if num < 0:
return False
snake_case_ : int = num
snake_case_ : int = 0
while num > 0:
snake_case_ : Union[str, Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCamelCase ( UpperCamelCase=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__UpperCAmelCase : List[str] = subparsers.add_parser("test" )
else:
__UpperCAmelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCAmelCase : List[str] = script_name
else:
__UpperCAmelCase : int = f"--config_file={args.config_file} {script_name}"
__UpperCAmelCase : Dict = ["accelerate-launch"] + test_args.split()
__UpperCAmelCase : Tuple = execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = test_command_parser()
__UpperCAmelCase : Any = parser.parse_args()
test_command(UpperCamelCase )
if __name__ == "__main__":
main()
| 77
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18}
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : List[Any] = image_size
__UpperCAmelCase : str = min_resolution
__UpperCAmelCase : Tuple = max_resolution
__UpperCAmelCase : Optional[Any] = do_resize
__UpperCAmelCase : Any = size
__UpperCAmelCase : Any = do_normalize
__UpperCAmelCase : Any = image_mean
__UpperCAmelCase : Optional[Any] = image_std
def a_ ( self : str):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = ViTImageProcessor if is_vision_available() else None
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self)
@property
def a_ ( self : Union[str, Any]):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase_ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase_ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase_ , "size"))
def a_ ( self : Dict):
"""simple docstring"""
pass
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image)
# Test not batched input
__UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray)
# Test not batched input
__UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor)
# Test not batched input
__UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 77
| 1
|
def UpperCAmelCase ( _lowerCamelCase = 400_0000 ):
A : Dict = [0, 1]
A : str = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A : Optional[int] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 704
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict:
A : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , )
A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
A : List[Any] = [log for log in logs if "eval_loss" in log.keys()]
A : Any = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A : List[str] = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
self.run_seqaseq_quick(
distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A : Dict = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
A : List[str] = experiments[experiment_id]
A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
A : Union[str, Any] = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] )
A : Dict = len(re.findall(__lowerCamelCase , cl.err ) )
self.assertEqual(__lowerCamelCase , data["n_matches"] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : int = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , )
# Check metrics
A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : Dict = [log for log in logs if "eval_loss" in log.keys()]
A : Dict = eval_metrics[0]
A : int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
# test if do_predict saves generations and metrics
A : Optional[Any] = os.listdir(__lowerCamelCase )
A : Any = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]:
A : Optional[int] = "--skip_memory_metrics 0"
A : str = self.run_trainer(
max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
A : int = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig
A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A : int = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A : Tuple = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]:
A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
A : Optional[int] = self.get_auto_remove_tmp_dir()
A : int = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
A : Optional[Any] = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCamelCase )}
""".split()
A : Optional[Any] = "\n --do_predict\n ".split()
A : Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A : Dict = get_gpu_count()
A : Any = get_torch_dist_unique_port()
A : Optional[Any] = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
A : Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
else:
A : List[Any] = ["run_translation.py"] + args
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
main()
return output_dir
| 17
| 0
|
from __future__ import annotations
from random import choice
def __A ( __lowerCamelCase ) -> List[Any]:
return choice(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> int:
a = random_pivot(__lowerCamelCase )
# partition based on pivot
# linear time
a = [e for e in lst if e < pivot]
a = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowerCamelCase ) < k - 1:
return kth_number(__lowerCamelCase , k - len(__lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 468
|
import operator as op
__UpperCamelCase : Optional[Any] = "scaler.pt"
__UpperCamelCase : Optional[Any] = "pytorch_model"
__UpperCamelCase : str = "random_states"
__UpperCamelCase : Optional[int] = "optimizer"
__UpperCamelCase : Optional[int] = "scheduler"
__UpperCamelCase : str = "pytorch_model.bin"
__UpperCamelCase : List[str] = "pytorch_model.bin.index.json"
__UpperCamelCase : List[str] = "model.safetensors"
__UpperCamelCase : Optional[int] = "model.safetensors.index.json"
__UpperCamelCase : List[str] = "1.10.2"
__UpperCamelCase : Dict = "py38"
__UpperCamelCase : List[str] = "4.17.0"
__UpperCamelCase : Any = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
__UpperCamelCase : Any = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
__UpperCamelCase : int = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
__UpperCamelCase : Dict = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
__UpperCamelCase : str = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
__UpperCamelCase : List[Any] = "2.0.1"
__UpperCamelCase : int = ["pdsh", "standard", "openmpi", "mvapich"]
__UpperCamelCase : List[str] = ["default", "reduce-overhead", "max-autotune"]
__UpperCamelCase : List[Any] = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__UpperCamelCase : List[Any] = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
__UpperCamelCase : List[str] = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
__UpperCamelCase : int = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 468
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Tuple = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 714
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase( _a ):
lowercase_ : List[Any] = (DPMSolverSinglestepScheduler,)
lowercase_ : List[str] = (("""num_inference_steps""", 25),)
def UpperCamelCase ( self, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf'),
'variance_type': None,
}
config.update(**lowerCamelCase)
return config
def UpperCamelCase ( self, lowerCamelCase=0, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Dict = dict(self.forward_default_kwargs)
_lowercase : Union[str, Any] = kwargs.pop('num_inference_steps', lowerCamelCase)
_lowercase : Optional[int] = self.dummy_sample
_lowercase : Optional[int] = 0.1 * sample
_lowercase : Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowercase : Any = self.get_scheduler_config(**lowerCamelCase)
_lowercase : List[Any] = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals
_lowercase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase)
_lowercase : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase)
new_scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals
_lowercase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase , _lowercase : List[Any] = sample, sample
for t in range(lowerCamelCase, time_step + scheduler.config.solver_order + 1):
_lowercase : Dict = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : int = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase=0, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[int] = dict(self.forward_default_kwargs)
_lowercase : List[str] = kwargs.pop('num_inference_steps', lowerCamelCase)
_lowercase : List[str] = self.dummy_sample
_lowercase : str = 0.1 * sample
_lowercase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowercase : Any = self.get_scheduler_config()
_lowercase : List[str] = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase)
_lowercase : List[Any] = scheduler_class.from_pretrained(lowerCamelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residual (must be after setting timesteps)
_lowercase : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase : Optional[int] = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : List[Any] = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self, lowerCamelCase=None, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
if scheduler is None:
_lowercase : str = self.scheduler_classes[0]
_lowercase : int = self.get_scheduler_config(**lowerCamelCase)
_lowercase : Optional[Any] = scheduler_class(**lowerCamelCase)
_lowercase : List[Any] = self.scheduler_classes[0]
_lowercase : Optional[int] = self.get_scheduler_config(**lowerCamelCase)
_lowercase : Optional[Any] = scheduler_class(**lowerCamelCase)
_lowercase : List[Any] = 10
_lowercase : List[str] = self.dummy_model()
_lowercase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowercase : Optional[int] = model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
return sample
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
_lowercase : Optional[int] = 50
_lowercase : Union[str, Any] = self.dummy_model()
_lowercase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
_lowercase : Optional[Any] = model(lowerCamelCase, lowerCamelCase)
_lowercase : int = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
_lowercase : Optional[int] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_5_7_4) < 1E-3
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
_lowercase : List[str] = self.full_loop(scheduler=lowerCamelCase)
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
_lowercase : str = DEISMultistepScheduler.from_config(scheduler.config)
_lowercase : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config)
_lowercase : Tuple = UniPCMultistepScheduler.from_config(scheduler.config)
_lowercase : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config)
_lowercase : Any = self.full_loop(scheduler=lowerCamelCase)
_lowercase : Optional[int] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase, prediction_type=lowerCamelCase, sample_max_value=lowerCamelCase, algorithm_type='dpmsolver++', solver_order=lowerCamelCase, solver_type=lowerCamelCase, )
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase, solver_type=lowerCamelCase, prediction_type=lowerCamelCase, algorithm_type=lowerCamelCase, )
_lowercase : Optional[Any] = self.full_loop(
solver_order=lowerCamelCase, solver_type=lowerCamelCase, prediction_type=lowerCamelCase, algorithm_type=lowerCamelCase, )
assert not torch.isnan(lowerCamelCase).any(), "Samples have nan numbers"
def UpperCamelCase ( self) -> str:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase)
self.check_over_configs(lower_order_final=lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('inf'))
self.check_over_configs(lambda_min_clipped=-5.1)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.check_over_configs(variance_type=lowerCamelCase)
self.check_over_configs(variance_type='learned_range')
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase, time_step=0)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = self.full_loop()
_lowercase : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = self.full_loop(use_karras_sigmas=lowerCamelCase)
_lowercase : List[str] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_2_4_8) < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.full_loop(prediction_type='v_prediction')
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.1_4_5_3) < 1E-3
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=lowerCamelCase)
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.0_6_4_9) < 1E-3
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = self.scheduler_classes[0]
_lowercase : Optional[int] = self.get_scheduler_config(thresholding=lowerCamelCase, dynamic_thresholding_ratio=0)
_lowercase : Any = scheduler_class(**lowerCamelCase)
_lowercase : str = 10
_lowercase : List[str] = self.dummy_model()
_lowercase : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowercase : Tuple = model(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
assert sample.dtype == torch.floataa
| 354
| 0
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ComputeEnvironment.AMAZON_SAGEMAKER
_snake_case = True
_snake_case = """ml.p3.2xlarge"""
_snake_case = """accelerate_sagemaker_execution_role"""
_snake_case = """hf-sm"""
_snake_case = """us-east-1"""
_snake_case = 1
_snake_case = """accelerate-sagemaker-1"""
_snake_case = """1.6"""
_snake_case = """4.4"""
_snake_case = """train.py"""
_snake_case = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
_snake_case = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
snake_case : List[str] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , A )
assert isinstance(converted_args["""do_train"""] , A )
assert isinstance(converted_args["""epochs"""] , A )
assert isinstance(converted_args["""learning_rate"""] , A )
assert isinstance(converted_args["""max_steps"""] , A )
with pytest.raises(A ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 587
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Any = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase : Optional[Any] = {
'roberta-base': 5_1_2,
'roberta-large': 5_1_2,
'roberta-large-mnli': 5_1_2,
'distilroberta-base': 5_1_2,
'roberta-base-openai-detector': 5_1_2,
'roberta-large-openai-detector': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
_snake_case = RobertaTokenizer
def __init__( self , A=None , A=None , A=None , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , A=True , **A , ) -> Optional[int]:
super().__init__(
A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , )
snake_case : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , A ) != add_prefix_space:
snake_case : Dict = getattr(A , pre_tok_state.pop("""type""" ) )
snake_case : List[str] = add_prefix_space
snake_case : Tuple = pre_tok_class(**A )
snake_case : Tuple = add_prefix_space
snake_case : int = """post_processor"""
snake_case : int = getattr(self.backend_tokenizer , A , A )
if tokenizer_component_instance:
snake_case : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case : Dict = tuple(state["""sep"""] )
if "cls" in state:
snake_case : Optional[int] = tuple(state["""cls"""] )
snake_case : List[str] = False
if state.get("""add_prefix_space""" , A ) != add_prefix_space:
snake_case : Tuple = add_prefix_space
snake_case : List[str] = True
if state.get("""trim_offsets""" , A ) != trim_offsets:
snake_case : Any = trim_offsets
snake_case : int = True
if changes_to_apply:
snake_case : str = getattr(A , state.pop("""type""" ) )
snake_case : Any = component_class(**A )
setattr(self.backend_tokenizer , A , A )
@property
def UpperCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase ( self , A ) -> Any:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else value
snake_case : int = value
def UpperCAmelCase ( self , *A , **A ) -> BatchEncoding:
snake_case : List[Any] = kwargs.get("""is_split_into_words""" , A )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> BatchEncoding:
snake_case : str = kwargs.get("""is_split_into_words""" , A )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A , **A )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
snake_case : List[Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def UpperCAmelCase ( self , A , A=None ) -> Any:
snake_case : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Dict = [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 587
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import random
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a = None ):
"""simple docstring"""
lowerCamelCase = value
lowerCamelCase = random()
lowerCamelCase = None
lowerCamelCase = None
def __repr__( self ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self ):
"""simple docstring"""
lowerCamelCase = str(self.value ) + """ """
lowerCamelCase = str(self.left or """""" )
lowerCamelCase = str(self.right or """""" )
return value + left + right
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCamelCase , lowerCamelCase = split(root.left , _lowerCAmelCase )
return left, root
else:
lowerCamelCase , lowerCamelCase = split(root.right , _lowerCAmelCase )
return root, right
def a__ ( snake_case__ , snake_case__ ) -> int:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCamelCase = merge(left.right , _lowerCAmelCase )
return left
else:
lowerCamelCase = merge(_lowerCAmelCase , right.left )
return right
def a__ ( snake_case__ , snake_case__ ) -> str:
lowerCamelCase = Node(_lowerCAmelCase )
lowerCamelCase , lowerCamelCase = split(_lowerCAmelCase , _lowerCAmelCase )
return merge(merge(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
def a__ ( snake_case__ , snake_case__ ) -> Dict:
lowerCamelCase , lowerCamelCase = split(_lowerCAmelCase , value - 1 )
lowerCamelCase , lowerCamelCase = split(_lowerCAmelCase , _lowerCAmelCase )
return merge(_lowerCAmelCase , _lowerCAmelCase )
def a__ ( snake_case__ ) -> List[str]:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def a__ ( snake_case__ , snake_case__ ) -> int:
for arg in args.split():
if arg[0] == "+":
lowerCamelCase = insert(_lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
lowerCamelCase = erase(_lowerCAmelCase , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def a__ ( ) -> Optional[Any]:
lowerCamelCase = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. \'q\' to quit. """ )
lowerCamelCase = input()
while args != "q":
lowerCamelCase = interact_treap(_lowerCAmelCase , _lowerCAmelCase )
print(_lowerCAmelCase )
lowerCamelCase = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 701
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase : Dict = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a__ ( snake_case__ ) -> list[float]:
lowerCamelCase = []
lowerCamelCase = len(snake_case__ )
for i in range(snake_case__ ):
lowerCamelCase = -1
for j in range(i + 1 , snake_case__ ):
if arr[i] < arr[j]:
lowerCamelCase = arr[j]
break
result.append(snake_case__ )
return result
def a__ ( snake_case__ ) -> list[float]:
lowerCamelCase = []
for i, outer in enumerate(snake_case__ ):
lowerCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCamelCase = inner
break
result.append(snake_case__ )
return result
def a__ ( snake_case__ ) -> list[float]:
lowerCamelCase = len(snake_case__ )
lowerCamelCase = []
lowerCamelCase = [-1] * arr_size
for index in reversed(range(snake_case__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCamelCase = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase : Dict = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 533
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=False ) -> Tuple:
a_ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a_ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
a_ : int = ''''''
else:
a_ : Optional[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ : List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
a_ : int = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : str = in_proj_weight[
: config.hidden_size, :
]
a_ : List[Any] = in_proj_bias[: config.hidden_size]
a_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
a_ : Dict = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> str:
a_ : Optional[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
a_ : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE__ )
a_ : Any = val
def lowerCAmelCase_ ( ) -> Tuple:
a_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : Any = ViTConfig()
a_ : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
a_ : Tuple = True
a_ : Any = int(vit_name[-12:-10] )
a_ : int = int(vit_name[-9:-6] )
else:
a_ : Optional[int] = 1_000
a_ : Any = '''huggingface/label-files'''
a_ : List[Any] = '''imagenet-1k-id2label.json'''
a_ : Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, repo_type="dataset" ), "r" ) )
a_ : Union[str, Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
a_ : str = idalabel
a_ : int = {v: k for k, v in idalabel.items()}
a_ : List[str] = int(vit_name[-6:-4] )
a_ : int = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
a_ : Union[str, Any] = 192
a_ : Optional[int] = 768
a_ : Optional[Any] = 12
a_ : str = 3
elif vit_name[9:].startswith("small" ):
a_ : str = 384
a_ : int = 1_536
a_ : List[str] = 12
a_ : int = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
a_ : int = 768
a_ : Any = 2_304
a_ : Optional[int] = 8
a_ : Union[str, Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
a_ : Optional[int] = 1_024
a_ : str = 4_096
a_ : Dict = 24
a_ : int = 16
elif vit_name[4:].startswith("huge" ):
a_ : str = 1_280
a_ : int = 5_120
a_ : Optional[int] = 32
a_ : Dict = 16
# load original model from timm
a_ : Optional[int] = timm.create_model(SCREAMING_SNAKE_CASE__, pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a_ : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = create_rename_keys(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
a_ : Optional[int] = ViTModel(SCREAMING_SNAKE_CASE__ ).eval()
else:
a_ : Union[str, Any] = ViTForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
a_ : int = DeiTImageProcessor(size=config.image_size )
else:
a_ : str = ViTImageProcessor(size=config.image_size )
a_ : str = image_processor(images=prepare_img(), return_tensors="pt" )
a_ : int = encoding['''pixel_values''']
a_ : Dict = model(SCREAMING_SNAKE_CASE__ )
if base_model:
a_ : List[str] = timm_model.forward_features(SCREAMING_SNAKE_CASE__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__, outputs.pooler_output, atol=1e-3 )
else:
a_ : Tuple = timm_model(SCREAMING_SNAKE_CASE__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__, outputs.logits, atol=1e-3 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 237
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _A ( _lowerCamelCase ):
_UpperCamelCase : torch.FloatTensor
class _A ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self : str , _A : int = 65_536 , _A : Optional[int] = None , _A : int = 2 , _A : int = 2 , _A : int = 0 , _A : str = "fourier" , _A : bool = True , _A : bool = False , _A : float = 0.0 , _A : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _A : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _A : Tuple[str] = "UNetMidBlock1D" , _A : str = None , _A : Tuple[int] = (32, 32, 64) , _A : str = None , _A : int = 8 , _A : int = 1 , _A : bool = False , ) -> Any:
"""simple docstring"""
super().__init__()
lowercase : int = sample_size
# time
if time_embedding_type == "fourier":
lowercase : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_A , log=_A , flip_sin_to_cos=_A )
lowercase : List[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase : int = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_A , downscale_freq_shift=_A )
lowercase : int = block_out_channels[0]
if use_timestep_embedding:
lowercase : Tuple = block_out_channels[0] * 4
lowercase : List[str] = TimestepEmbedding(
in_channels=_A , time_embed_dim=_A , act_fn=_A , out_dim=block_out_channels[0] , )
lowercase : Optional[int] = nn.ModuleList([] )
lowercase : Tuple = None
lowercase : int = nn.ModuleList([] )
lowercase : Union[str, Any] = None
# down
lowercase : Union[str, Any] = in_channels
for i, down_block_type in enumerate(_A ):
lowercase : Any = output_channel
lowercase : Any = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase : Dict = i == len(_A ) - 1
lowercase : int = get_down_block(
_A , num_layers=_A , in_channels=_A , out_channels=_A , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_A )
# mid
lowercase : int = get_mid_block(
_A , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_A , add_downsample=_A , )
# up
lowercase : Optional[int] = list(reversed(_A ) )
lowercase : Dict = reversed_block_out_channels[0]
if out_block_type is None:
lowercase : int = out_channels
else:
lowercase : Optional[int] = block_out_channels[0]
for i, up_block_type in enumerate(_A ):
lowercase : List[str] = output_channel
lowercase : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(_A ) - 1 else final_upsample_channels
)
lowercase : Union[str, Any] = i == len(_A ) - 1
lowercase : Tuple = get_up_block(
_A , num_layers=_A , in_channels=_A , out_channels=_A , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_A )
lowercase : Tuple = output_channel
# out
lowercase : List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowercase : str = get_out_block(
out_block_type=_A , num_groups_out=_A , embed_dim=block_out_channels[0] , out_channels=_A , act_fn=_A , fc_dim=block_out_channels[-1] // 4 , )
def __a ( self : Any , _A : torch.FloatTensor , _A : Union[torch.Tensor, float, int] , _A : bool = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
lowercase : int = timestep
if not torch.is_tensor(_A ):
lowercase : str = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
lowercase : Dict = timesteps[None].to(sample.device )
lowercase : str = self.time_proj(_A )
if self.config.use_timestep_embedding:
lowercase : int = self.time_mlp(_A )
else:
lowercase : List[Any] = timestep_embed[..., None]
lowercase : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase : Optional[int] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase : int = ()
for downsample_block in self.down_blocks:
lowercase , lowercase : Dict = downsample_block(hidden_states=_A , temb=_A )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase : Optional[int] = self.mid_block(_A , _A )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase : Optional[Any] = down_block_res_samples[-1:]
lowercase : Union[str, Any] = down_block_res_samples[:-1]
lowercase : Optional[int] = upsample_block(_A , res_hidden_states_tuple=_A , temb=_A )
# 5. post-process
if self.out_block:
lowercase : List[Any] = self.out_block(_A , _A )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_A )
| 217
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase="None" , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ) -> Optional[Any]:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def a_ ( self ) -> int:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
_a = TFDebertaVaModel(config=__UpperCamelCase )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = [input_ids, input_mask]
_a = model(__UpperCamelCase )
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
_a = TFDebertaVaForMaskedLM(config=__UpperCamelCase )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_a = self.num_labels
_a = TFDebertaVaForSequenceClassification(config=__UpperCamelCase )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
_a = self.num_labels
_a = TFDebertaVaForTokenClassification(config=__UpperCamelCase )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_a = TFDebertaVaForQuestionAnswering(config=__UpperCamelCase )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self ) -> int:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def a_ ( self ) -> Union[str, Any]:
_a = TFDebertaVaModelTester(self )
_a = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def a_ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def a_ ( self ) -> Tuple:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def a_ ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def a_ ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def a_ ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def a_ ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def a_ ( self ) -> Any:
_a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def a_ ( self ) -> List[Any]:
pass
@slow
def a_ ( self ) -> str:
_a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
_a = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_a = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
_a = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1e-4 )
| 712
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase__ = object()
# For specifying empty leaf dict `{}`
lowercase__ = object()
def __UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_a = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
_a = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase , ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __UpperCamelCase ( __lowerCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
def replace(__lowerCamelCase : Tuple , __lowerCamelCase : Any ):
for rule, replacement in rules:
if _match(__lowerCamelCase , __lowerCamelCase ):
return replacement
return val
return replace
def __UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp" , __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __UpperCamelCase ( __lowerCamelCase : Dict ) -> Tuple:
'''simple docstring'''
_a = _get_partition_rules()
_a = _replacement_rules(__lowerCamelCase )
_a = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
_a = {k: replace(__lowerCamelCase , __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 276
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any =inspect.getfile(accelerate.test_utils )
lowercase : List[str] =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowercase : List[str] =test_metrics
@require_cpu
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowercase : List[str] =['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
| 92
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """gptsan-japanese"""
__lowerCAmelCase = [
"""past_key_values""",
]
__lowerCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case_=3_6000 , snake_case_=1280 , snake_case_=1024 , snake_case_=8192 , snake_case_=4096 , snake_case_=128 , snake_case_=10 , snake_case_=0 , snake_case_=16 , snake_case_=16 , snake_case_=128 , snake_case_=0.0 , snake_case_=1e-5 , snake_case_=False , snake_case_=0.0 , snake_case_="float32" , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=0.0_0_2 , snake_case_=False , snake_case_=True , snake_case_=3_5998 , snake_case_=3_5995 , snake_case_=3_5999 , **snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = vocab_size
__UpperCAmelCase: List[str] = max_position_embeddings
__UpperCAmelCase: List[Any] = d_model
__UpperCAmelCase: List[str] = d_ff
__UpperCAmelCase: Union[str, Any] = d_ext
__UpperCAmelCase: List[Any] = d_spout
__UpperCAmelCase: Dict = num_switch_layers
__UpperCAmelCase: List[str] = num_ext_layers
__UpperCAmelCase: Tuple = num_switch_layers + num_ext_layers
__UpperCAmelCase: Any = num_heads
__UpperCAmelCase: Optional[Any] = num_experts
__UpperCAmelCase: Tuple = expert_capacity
__UpperCAmelCase: Tuple = dropout_rate
__UpperCAmelCase: Optional[int] = layer_norm_epsilon
__UpperCAmelCase: Union[str, Any] = router_bias
__UpperCAmelCase: Optional[Any] = router_jitter_noise
__UpperCAmelCase: str = router_dtype
__UpperCAmelCase: Union[str, Any] = router_ignore_padding_tokens
__UpperCAmelCase: Optional[int] = output_hidden_states
__UpperCAmelCase: Optional[Any] = output_attentions
__UpperCAmelCase: Any = initializer_factor
__UpperCAmelCase: Tuple = output_router_logits
__UpperCAmelCase: Tuple = use_cache
super().__init__(
separator_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
| 523
| 0
|
import math
class UpperCAmelCase__ :
"""simple docstring"""
def lowercase_ ( self : int , __lowerCamelCase : list[list[float]] , __lowerCamelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 0.0
for i in range(len(__lowerCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase_ ( self : Optional[int] , __lowerCamelCase : list[list[int | float]] , __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : float ) -> list[list[int | float]]:
for i in range(len(__lowerCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
SCREAMING_SNAKE_CASE__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
SCREAMING_SNAKE_CASE__ = SelfOrganizingMap()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 0.5
for _ in range(_A ):
for j in range(len(_A ) ):
# training sample
SCREAMING_SNAKE_CASE__ = training_samples[j]
# Compute the winning vector
SCREAMING_SNAKE_CASE__ = self_organizing_map.get_winner(_A , _A )
# Update the winning vector
SCREAMING_SNAKE_CASE__ = self_organizing_map.update(_A , _A , _A , _A )
# classify test sample
SCREAMING_SNAKE_CASE__ = [0, 0, 0, 1]
SCREAMING_SNAKE_CASE__ = self_organizing_map.get_winner(_A , _A )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 472
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for data in source_data:
for i, el in enumerate(_A ):
if len(_A ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_A ) )
return data_lists
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for dlist, weight in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = min(_A )
SCREAMING_SNAKE_CASE__ = max(_A )
SCREAMING_SNAKE_CASE__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE__ = F'''Invalid weight of {weight:f} provided'''
raise ValueError(_A )
score_lists.append(_A )
return score_lists
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = final_scores[j] + ele
return final_scores
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_data(_A )
SCREAMING_SNAKE_CASE__ = calculate_each_score(_A , _A )
SCREAMING_SNAKE_CASE__ = generate_final_scores(_A )
# append scores to source data
for i, ele in enumerate(_A ):
source_data[i].append(_A )
return source_data
| 472
| 1
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case = logging.get_logger(__name__)
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =["input_values", "attention_mask"]
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1_60_00 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 80 , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : str = "hann_window" , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : float = 80 , SCREAMING_SNAKE_CASE__ : float = 76_00 , SCREAMING_SNAKE_CASE__ : float = 1e-10 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : List[str] , ):
"""simple docstring"""
super().__init__(feature_size=SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , padding_value=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase = do_normalize
UpperCamelCase = return_attention_mask
UpperCamelCase = num_mel_bins
UpperCamelCase = hop_length
UpperCamelCase = win_length
UpperCamelCase = win_function
UpperCamelCase = frame_signal_scale
UpperCamelCase = fmin
UpperCamelCase = fmax
UpperCamelCase = mel_floor
UpperCamelCase = reduction_factor
UpperCamelCase = win_length * sampling_rate // 10_00
UpperCamelCase = hop_length * sampling_rate // 10_00
UpperCamelCase = optimal_fft_length(self.sample_size )
UpperCamelCase = (self.n_fft // 2) + 1
UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , SCREAMING_SNAKE_CASE__ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , SCREAMING_SNAKE_CASE__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : List[np.ndarray] , SCREAMING_SNAKE_CASE__ : List[np.ndarray] , SCREAMING_SNAKE_CASE__ : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ , np.intaa )
UpperCamelCase = []
for vector, length in zip(SCREAMING_SNAKE_CASE__ , attention_mask.sum(-1 ) ):
UpperCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase = padding_value
normed_input_values.append(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : np.ndarray , ):
"""simple docstring"""
UpperCamelCase = spectrogram(
SCREAMING_SNAKE_CASE__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Dict , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
UpperCamelCase = self._process_audio(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
else:
UpperCamelCase = None
if audio_target is not None:
UpperCamelCase = self._process_audio(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if inputs is None:
return inputs_target
else:
UpperCamelCase = inputs_target['input_values']
UpperCamelCase = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
UpperCamelCase = decoder_attention_mask
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
UpperCamelCase = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
UpperCamelCase = np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCamelCase = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase = [speech]
# needed to make pad() work on spectrogram inputs
UpperCamelCase = self.feature_size
# convert into correct format for padding
if is_target:
UpperCamelCase = [self._extract_mel_features(SCREAMING_SNAKE_CASE__ ) for waveform in speech]
UpperCamelCase = BatchFeature({'input_values': features} )
UpperCamelCase = self.num_mel_bins
else:
UpperCamelCase = BatchFeature({'input_values': speech} )
UpperCamelCase = self.pad(
SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = feature_size_hack
# convert input values to correct format
UpperCamelCase = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
UpperCamelCase = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCamelCase = [array.astype(np.floataa ) for array in input_values]
elif isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCamelCase = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCamelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
UpperCamelCase = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCamelCase = (
attention_mask
if self._get_padding_strategies(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=SCREAMING_SNAKE_CASE__ , padding_value=self.padding_value )
if return_tensors is not None:
UpperCamelCase = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE__ )
return padded_inputs
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCamelCase = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 282
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
# setable values
SCREAMING_SNAKE_CASE_ : Optional[int] =None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] =None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] =None # sigma(t_i)
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] ):
"""simple docstring"""
return cls()
@dataclass
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : KarrasVeSchedulerState
class _lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1_00 , SCREAMING_SNAKE_CASE__ : float = 1.007 , SCREAMING_SNAKE_CASE__ : float = 80 , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 50 , ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple = () ):
"""simple docstring"""
UpperCamelCase = jnp.arange(0 , SCREAMING_SNAKE_CASE__ )[::-1].copy()
UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE__ , schedule=jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa ) , timesteps=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : random.KeyArray , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase = random.split(SCREAMING_SNAKE_CASE__ , num=1 )
UpperCamelCase = self.config.s_noise * random.normal(key=SCREAMING_SNAKE_CASE__ , shape=sample.shape )
UpperCamelCase = sigma + gamma * sigma
UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_hat + sigma_hat * model_output
UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ , derivative=SCREAMING_SNAKE_CASE__ , state=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_prev + sigma_prev * model_output
UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ , derivative=SCREAMING_SNAKE_CASE__ , state=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
raise NotImplementedError()
| 282
| 1
|
'''simple docstring'''
from __future__ import annotations
def __A ( a_ : list[int] ,a_ : int ):
lowerCAmelCase : Any = []
lowerCAmelCase : List[Any] = []
lowerCAmelCase : int = 0
lowerCAmelCase : str = sum(a_ )
create_state_space_tree(a_ ,a_ ,a_ ,a_ ,a_ ,a_ )
return result
def __A ( a_ : list[int] ,a_ : int ,a_ : int ,a_ : list[int] ,a_ : list[list[int]] ,a_ : int ,):
if sum(a_ ) > max_sum or (remaining_nums_sum + sum(a_ )) < max_sum:
return
if sum(a_ ) == max_sum:
result.append(a_ )
return
for index in range(a_ ,len(a_ ) ):
create_state_space_tree(
a_ ,a_ ,index + 1 ,[*path, nums[index]] ,a_ ,remaining_nums_sum - nums[index] ,)
lowerCAmelCase = [3, 34, 4, 12, 5, 2]
lowerCAmelCase = 9
lowerCAmelCase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 702
|
'''simple docstring'''
import numpy as np
def __A ( a_ : np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551
| 0
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_lowerCAmelCase, _lowerCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_lowerCAmelCase = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_lowerCAmelCase = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowerCAmelCase = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 10
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase: List[Any] =logging.get_logger(__name__)
class lowerCamelCase__ :
def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ) -> Any:
"""simple docstring"""
if not conversation_id:
lowercase : int = uuid.uuida()
if past_user_inputs is None:
lowercase : Any = []
if generated_responses is None:
lowercase : Dict = []
lowercase : uuid.UUID = conversation_id
lowercase : List[str] = past_user_inputs
lowercase : List[str] = generated_responses
lowercase : Optional[str] = text
def __eq__( self , snake_case ) -> Any:
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCAmelCase ( self , snake_case , snake_case = False ) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
lowercase : Any = text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
lowercase : Union[str, Any] = text
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase : Optional[Any] = None
def _UpperCAmelCase ( self , snake_case ) -> Tuple:
"""simple docstring"""
self.generated_responses.append(snake_case )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
lowercase : Any = """user""" if is_user else """bot"""
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
__UpperCamelCase , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class lowerCamelCase__ ( __UpperCamelCase ):
def __init__( self , *snake_case , **snake_case ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*snake_case , **snake_case )
if self.tokenizer.pad_token_id is None:
lowercase : Union[str, Any] = self.tokenizer.eos_token
def _UpperCAmelCase ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ) -> Tuple:
"""simple docstring"""
lowercase : int = {}
lowercase : Union[str, Any] = {}
lowercase : Union[str, Any] = {}
if min_length_for_response is not None:
lowercase : List[Any] = min_length_for_response
if minimum_tokens is not None:
lowercase : Dict = minimum_tokens
if "max_length" in generate_kwargs:
lowercase : List[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase : List[str] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case )
return preprocess_params, forward_params, postprocess_params
def __call__( self , snake_case , snake_case=0 , **snake_case ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = super().__call__(snake_case , num_workers=snake_case , **snake_case )
if isinstance(snake_case , snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
def _UpperCAmelCase ( self , snake_case , snake_case=3_2 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
lowercase : Any = self.tokenizer._build_conversation_input_ids(snake_case )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase : Any = self._legacy_parse_and_tokenize(snake_case )
if self.framework == "pt":
lowercase : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase : str = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCAmelCase ( self , snake_case , snake_case=1_0 , **snake_case ) -> int:
"""simple docstring"""
lowercase : Any = generate_kwargs.get("""max_length""" , self.model.config.max_length )
lowercase : Tuple = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
lowercase : List[Any] = max_length - minimum_tokens
lowercase : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
lowercase : int = model_inputs["""attention_mask"""][:, -trim:]
lowercase : int = model_inputs.pop("""conversation""" )
lowercase : Optional[int] = max_length
lowercase : Optional[int] = self.model.generate(**snake_case , **snake_case )
if self.model.config.is_encoder_decoder:
lowercase : Union[str, Any] = 1
else:
lowercase : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCAmelCase ( self , snake_case , snake_case=True ) -> List[str]:
"""simple docstring"""
lowercase : int = model_outputs["""output_ids"""]
lowercase : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , )
lowercase : str = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(snake_case )
return conversation
def _UpperCAmelCase ( self , snake_case ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.tokenizer.eos_token_id
lowercase : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
if len(snake_case ) > self.tokenizer.model_max_length:
lowercase : Union[str, Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 607
| 0
|
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
snake_case_ : List[str] = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class lowercase__ ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ = " " ):
'''simple docstring'''
UpperCamelCase = sentence_delimiter
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return list(lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase__ ):
chars.extend(self.process_string(lowerCamelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
snake_case_ : Optional[Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
snake_case_ : Union[str, Any] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
snake_case_ : List[str] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
snake_case_ : Tuple = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
snake_case_ : Optional[int] = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase__ , lowerCamelCase__ , truth_transform=lowerCamelCase__ , hypothesis_transform=lowerCamelCase__ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase__ , lowerCamelCase__ , truth_transform=lowerCamelCase__ , hypothesis_transform=lowerCamelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 350
|
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case_ : int = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
snake_case_ : Optional[int] = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
snake_case_ : Optional[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def __snake_case ( _UpperCAmelCase : Optional[Any]):
def remove_articles(_UpperCAmelCase : str):
UpperCamelCase = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
return re.sub(_UpperCAmelCase, ''' ''', _UpperCAmelCase)
def white_space_fix(_UpperCAmelCase : Union[str, Any]):
return " ".join(text.split())
def remove_punc(_UpperCAmelCase : Dict):
UpperCamelCase = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(_UpperCAmelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase))))
def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[str]):
return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase))
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : Optional[int]):
UpperCamelCase = [any(compute_exact(_UpperCAmelCase, _UpperCAmelCase) for ref in refs) for pred, refs in zip(_UpperCAmelCase, _UpperCAmelCase)]
return (sum(_UpperCAmelCase) / len(_UpperCAmelCase)) * 100
def __snake_case ( _UpperCAmelCase : Any, _UpperCAmelCase : Optional[int], _UpperCAmelCase : int, _UpperCAmelCase : List[str]):
UpperCamelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCamelCase = Counter(_UpperCAmelCase)
UpperCamelCase = Counter(_UpperCAmelCase)
UpperCamelCase = Counter()
for sgram, scount in sgramcounter.items():
UpperCamelCase = scount * numref
UpperCamelCase = Counter(_UpperCAmelCase)
UpperCamelCase = Counter()
for cgram, ccount in cgramcounter.items():
UpperCamelCase = ccount * numref
# KEEP
UpperCamelCase = sgramcounter_rep & cgramcounter_rep
UpperCamelCase = keepgramcounter_rep & rgramcounter
UpperCamelCase = sgramcounter_rep & rgramcounter
UpperCamelCase = 0
UpperCamelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase = 1
UpperCamelCase = 1
if len(_UpperCAmelCase) > 0:
UpperCamelCase = keeptmpscorea / len(_UpperCAmelCase)
if len(_UpperCAmelCase) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCamelCase = keeptmpscorea / sum(keepgramcounterall_rep.values())
UpperCamelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCamelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCamelCase = sgramcounter_rep - cgramcounter_rep
UpperCamelCase = delgramcounter_rep - rgramcounter
UpperCamelCase = sgramcounter_rep - rgramcounter
UpperCamelCase = 0
UpperCamelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase = 1
if len(_UpperCAmelCase) > 0:
UpperCamelCase = deltmpscorea / len(_UpperCAmelCase)
# ADDITION
UpperCamelCase = set(_UpperCAmelCase) - set(_UpperCAmelCase)
UpperCamelCase = set(_UpperCAmelCase) & set(_UpperCAmelCase)
UpperCamelCase = set(_UpperCAmelCase) - set(_UpperCAmelCase)
UpperCamelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase = 1
UpperCamelCase = 1
if len(_UpperCAmelCase) > 0:
UpperCamelCase = addtmpscore / len(_UpperCAmelCase)
if len(_UpperCAmelCase) > 0:
UpperCamelCase = addtmpscore / len(_UpperCAmelCase)
UpperCamelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCamelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : Tuple, _UpperCAmelCase : str):
UpperCamelCase = len(_UpperCAmelCase)
UpperCamelCase = ssent.split(''' ''')
UpperCamelCase = csent.split(''' ''')
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for rsent in rsents:
UpperCamelCase = rsent.split(''' ''')
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
ragramslist.append(_UpperCAmelCase)
for i in range(0, len(_UpperCAmelCase) - 1):
if i < len(_UpperCAmelCase) - 1:
UpperCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_UpperCAmelCase)
if i < len(_UpperCAmelCase) - 2:
UpperCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_UpperCAmelCase)
if i < len(_UpperCAmelCase) - 3:
UpperCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_UpperCAmelCase)
ragramslist.append(_UpperCAmelCase)
ragramslist.append(_UpperCAmelCase)
ragramslist.append(_UpperCAmelCase)
for i in range(0, len(_UpperCAmelCase) - 1):
if i < len(_UpperCAmelCase) - 1:
UpperCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_UpperCAmelCase)
if i < len(_UpperCAmelCase) - 2:
UpperCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_UpperCAmelCase)
if i < len(_UpperCAmelCase) - 3:
UpperCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_UpperCAmelCase)
for i in range(0, len(_UpperCAmelCase) - 1):
if i < len(_UpperCAmelCase) - 1:
UpperCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_UpperCAmelCase)
if i < len(_UpperCAmelCase) - 2:
UpperCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_UpperCAmelCase)
if i < len(_UpperCAmelCase) - 3:
UpperCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_UpperCAmelCase)
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = SARIngram(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = SARIngram(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = SARIngram(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = SARIngram(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = sum([keepascore, keepascore, keepascore, keepascore]) / 4
UpperCamelCase = sum([delascore, delascore, delascore, delascore]) / 4
UpperCamelCase = sum([addascore, addascore, addascore, addascore]) / 4
UpperCamelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __snake_case ( _UpperCAmelCase : List[str], _UpperCAmelCase : bool = True, _UpperCAmelCase : str = "13a", _UpperCAmelCase : bool = True):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCamelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__).major >= 2:
UpperCamelCase = sacrebleu.metrics.bleu._get_tokenizer(_UpperCAmelCase)()(_UpperCAmelCase)
else:
UpperCamelCase = sacrebleu.TOKENIZERS[tokenizer]()(_UpperCAmelCase)
elif tokenizer == "moses":
UpperCamelCase = sacremoses.MosesTokenizer().tokenize(_UpperCAmelCase, return_str=_UpperCAmelCase, escape=_UpperCAmelCase)
elif tokenizer == "penn":
UpperCamelCase = sacremoses.MosesTokenizer().penn_tokenize(_UpperCAmelCase, return_str=_UpperCAmelCase)
else:
UpperCamelCase = sentence
if not return_str:
UpperCamelCase = normalized_sent.split()
return normalized_sent
def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[Any]):
if not (len(_UpperCAmelCase) == len(_UpperCAmelCase) == len(_UpperCAmelCase)):
raise ValueError('''Sources length must match predictions and references lengths.''')
UpperCamelCase = 0
for src, pred, refs in zip(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase):
sari_score += SARIsent(normalize(_UpperCAmelCase), normalize(_UpperCAmelCase), [normalize(_UpperCAmelCase) for sent in refs])
UpperCamelCase = sari_score / len(_UpperCAmelCase)
return 100 * sari_score
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : Dict, _UpperCAmelCase : Any="exp", _UpperCAmelCase : str=None, _UpperCAmelCase : Optional[Any]=False, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : List[Any]=False, ):
UpperCamelCase = len(references[0])
if any(len(_UpperCAmelCase) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCamelCase = [[refs[i] for refs in references] for i in range(_UpperCAmelCase)]
UpperCamelCase = sacrebleu.corpus_bleu(
_UpperCAmelCase, _UpperCAmelCase, smooth_method=_UpperCAmelCase, smooth_value=_UpperCAmelCase, force=_UpperCAmelCase, lowercase=_UpperCAmelCase, use_effective_order=_UpperCAmelCase, )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = {}
result.update({'''sari''': compute_sari(sources=lowerCamelCase__ , predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'''exact''': compute_em(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
return result
| 350
| 1
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class snake_case_ :
def __A ( self , __lowerCAmelCase ):
raise NotImplementedError()
def __A ( self ):
raise NotImplementedError()
class snake_case_ ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = skip_prompt
SCREAMING_SNAKE_CASE_ : Optional[int] = decode_kwargs
# variables used in the streaming process
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def __A ( self , __lowerCAmelCase ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
SCREAMING_SNAKE_CASE_ : int = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text[self.print_len :]
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : str = 0
# If the last token is a CJK character, we print the characters.
elif len(__lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
SCREAMING_SNAKE_CASE_ : str = text[self.print_len :]
self.print_len += len(__lowerCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(__lowerCAmelCase )
self.on_finalized_text(__lowerCAmelCase )
def __A ( self ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
SCREAMING_SNAKE_CASE_ : List[str] = text[self.print_len :]
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : int = 0
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''
SCREAMING_SNAKE_CASE_ : List[str] = True
self.on_finalized_text(__lowerCAmelCase , stream_end=__lowerCAmelCase )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
print(__lowerCAmelCase , flush=__lowerCAmelCase , end='' if not stream_end else None )
def __A ( self , __lowerCAmelCase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
class snake_case_ ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , **__lowerCAmelCase ):
super().__init__(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = Queue()
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = timeout
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
self.text_queue.put(__lowerCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
return self
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 345
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__: Optional[int] = logging.get_logger(__name__)
lowerCAmelCase__: List[Any] = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : List[str] = 'visual_bert'
def __init__( self , __lowerCAmelCase=30_522 , __lowerCAmelCase=768 , __lowerCAmelCase=512 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3_072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1e-12 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : Any = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 345
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 429
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = DownBlockaD # noqa F405
_lowercase : str = '''down'''
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = ResnetDownsampleBlockaD # noqa F405
_lowercase : Any = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = AttnDownBlockaD # noqa F405
_lowercase : Any = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = AttnSkipDownBlockaD # noqa F405
_lowercase : Dict = '''down'''
@property
def lowerCamelCase_ ( self: int ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = AttnDownEncoderBlockaD # noqa F405
_lowercase : Union[str, Any] = '''down'''
@property
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : Dict = '''mid'''
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : int = '''mid'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = UpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = CrossAttnUpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = AttnUpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = SkipUpBlockaD # noqa F405
_lowercase : int = '''up'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = AttnSkipUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpDecoderBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 429
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : List[str] = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = OpenAIGPTTokenizer
_lowerCamelCase = OpenAIGPTTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(UpperCamelCase ) )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return "lower newer", "lower newer"
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase_ = "lower"
lowerCamelCase_ = ["low", "er</w>"]
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = tokens + ["<unk>"]
lowerCamelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self , UpperCamelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# Simple input
lowerCamelCase_ = "This is a simple input"
lowerCamelCase_ = ["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase_ = ("This is a simple input", "This is a pair")
lowerCamelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , )
def snake_case ( self ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class snake_case ( lowercase ):
"""simple docstring"""
pass
| 675
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
snake_case_ : list[Any] = []
snake_case_ : int = 0
snake_case_ : int = 0
def UpperCAmelCase__ ( self ) -> bool:
'''simple docstring'''
return self.head == self.tail
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
self.data.append(_lowercase )
snake_case_ : Union[str, Any] = self.tail + 1
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = self.data[self.head]
snake_case_ : Any = self.head + 1
return ret
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self.tail - self.head
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : Optional[int] = data
snake_case_ : MyNode | None = None
snake_case_ : MyNode | None = None
snake_case_ : int = 1
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return self.data
def UpperCAmelCase__ ( self ) -> MyNode | None:
'''simple docstring'''
return self.left
def UpperCAmelCase__ ( self ) -> MyNode | None:
'''simple docstring'''
return self.right
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self.height
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : List[str] = data
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : int = node
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : int = node
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : Union[str, Any] = height
def __lowerCAmelCase ( __UpperCamelCase : MyNode | None ):
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a > b:
return a
return b
def __lowerCAmelCase ( __UpperCamelCase : MyNode ):
'''simple docstring'''
print("""left rotation node:""" , node.get_data() )
snake_case_ : List[str] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__UpperCamelCase )
snake_case_ : List[str] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
snake_case_ : Tuple = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCamelCase )
return ret
def __lowerCAmelCase ( __UpperCamelCase : MyNode ):
'''simple docstring'''
print("""right rotation node:""" , node.get_data() )
snake_case_ : Tuple = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__UpperCamelCase )
snake_case_ : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
snake_case_ : List[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCamelCase )
return ret
def __lowerCAmelCase ( __UpperCamelCase : MyNode ):
'''simple docstring'''
snake_case_ : List[str] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__UpperCamelCase ) )
return right_rotation(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : MyNode ):
'''simple docstring'''
snake_case_ : Optional[int] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__UpperCamelCase ) )
return left_rotation(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : MyNode | None , __UpperCamelCase : Any ):
'''simple docstring'''
if node is None:
return MyNode(__UpperCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __UpperCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
snake_case_ : int = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
snake_case_ : Optional[Any] = right_rotation(__UpperCamelCase )
else:
snake_case_ : List[str] = lr_rotation(__UpperCamelCase )
else:
node.set_right(insert_node(node.get_right() , __UpperCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
snake_case_ : Optional[Any] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
snake_case_ : Optional[Any] = rl_rotation(__UpperCamelCase )
else:
snake_case_ : int = left_rotation(__UpperCamelCase )
snake_case_ : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
return node
def __lowerCAmelCase ( __UpperCamelCase : MyNode ):
'''simple docstring'''
while True:
snake_case_ : Union[str, Any] = root.get_right()
if right_child is None:
break
snake_case_ : List[Any] = right_child
return root.get_data()
def __lowerCAmelCase ( __UpperCamelCase : MyNode ):
'''simple docstring'''
while True:
snake_case_ : List[str] = root.get_left()
if left_child is None:
break
snake_case_ : str = left_child
return root.get_data()
def __lowerCAmelCase ( __UpperCamelCase : MyNode , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : List[str] = root.get_left()
snake_case_ : List[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
snake_case_ : Tuple = get_left_most(__UpperCamelCase )
root.set_data(__UpperCamelCase )
root.set_right(del_node(__UpperCamelCase , __UpperCamelCase ) )
elif left_child is not None:
snake_case_ : str = left_child
elif right_child is not None:
snake_case_ : Optional[Any] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(__UpperCamelCase , __UpperCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__UpperCamelCase , __UpperCamelCase ) )
if get_height(__UpperCamelCase ) - get_height(__UpperCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
snake_case_ : List[Any] = left_rotation(__UpperCamelCase )
else:
snake_case_ : Optional[int] = rl_rotation(__UpperCamelCase )
elif get_height(__UpperCamelCase ) - get_height(__UpperCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
snake_case_ : Union[str, Any] = right_rotation(__UpperCamelCase )
else:
snake_case_ : Union[str, Any] = lr_rotation(__UpperCamelCase )
snake_case_ : List[str] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__UpperCamelCase )
return root
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
snake_case_ : MyNode | None = None
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return get_height(self.root )
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
print("""insert:""" + str(_lowercase ) )
snake_case_ : List[str] = insert_node(self.root , _lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
print("""delete:""" + str(_lowercase ) )
if self.root is None:
print("""Tree is empty!""" )
return
snake_case_ : str = del_node(self.root , _lowercase )
def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
snake_case_ : Dict = """"""
snake_case_ : List[Any] = MyQueue()
q.push(self.root )
snake_case_ : Optional[Any] = self.get_height()
if layer == 0:
return output
snake_case_ : Union[str, Any] = 0
while not q.is_empty():
snake_case_ : str = q.pop()
snake_case_ : Dict = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_lowercase )
q.push(_lowercase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
snake_case_ : Tuple = cnt + 1
for i in range(1_0_0 ):
if cnt == math.pow(2 , _lowercase ) - 1:
snake_case_ : List[Any] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __lowerCAmelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowerCAmelCase : Dict = AVLtree()
__lowerCAmelCase : List[str] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 21
|
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21
| 1
|
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = [], []
while len(__snake_case ) > 1:
lowerCamelCase__ , lowerCamelCase__ = min(__snake_case ), max(__snake_case )
start.append(__snake_case )
end.append(__snake_case )
collection.remove(__snake_case )
collection.remove(__snake_case )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input("Enter numbers separated by a comma:\n").strip()
_a = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 481
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
_a = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__ = '''lm_head'''
lowerCamelCase__ = getattr(__snake_case ,__snake_case )
if weight_type is not None:
lowerCamelCase__ = getattr(__snake_case ,__snake_case ).shape
else:
lowerCamelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = fairseq_model.state_dict()
lowerCamelCase__ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,hf_model.config.feat_extract_norm == '''group''' ,)
lowerCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(__snake_case )[0].split('''.''' )[-2]
lowerCamelCase__ = mapped_key.replace('''*''' ,__snake_case )
if "weight_g" in name:
lowerCamelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ = '''weight_v'''
elif "bias" in name:
lowerCamelCase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ = '''weight'''
else:
lowerCamelCase__ = None
set_recursively(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = full_name.split('''conv_layers.''' )[-1]
lowerCamelCase__ = name.split('''.''' )
lowerCamelCase__ = int(items[0] )
lowerCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=True ) -> int:
'''simple docstring'''
if config_path is not None:
lowerCamelCase__ = UniSpeechConfig.from_pretrained(__snake_case )
else:
lowerCamelCase__ = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__ = Dictionary.load_from_json(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ = target_dict.pad_index
lowerCamelCase__ = target_dict.bos_index
lowerCamelCase__ = target_dict.eos_index
lowerCamelCase__ = len(target_dict.symbols )
lowerCamelCase__ = os.path.join(__snake_case ,'''vocab.json''' )
if not os.path.isdir(__snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) )
return
os.makedirs(__snake_case ,exist_ok=__snake_case )
lowerCamelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__ = 42
lowerCamelCase__ = 43
with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(__snake_case ,__snake_case )
lowerCamelCase__ = WavaVecaPhonemeCTCTokenizer(
__snake_case ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__snake_case ,)
lowerCamelCase__ = True if config.feat_extract_norm == '''layer''' else False
lowerCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=__snake_case ,return_attention_mask=__snake_case ,)
lowerCamelCase__ = WavaVecaProcessor(feature_extractor=__snake_case ,tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
lowerCamelCase__ = UniSpeechForCTC(__snake_case )
else:
lowerCamelCase__ = UniSpeechForPreTraining(__snake_case )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__ = model[0].eval()
recursively_load_weights(__snake_case ,__snake_case ,__snake_case )
hf_unispeech.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_a = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 481
| 1
|
"""simple docstring"""
from collections.abc import Callable
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : float = a
lowerCamelCase : float = b
if function(a_ ) == 0: # one of the a or b is a root for the function
return a
elif function(a_ ) == 0:
return b
elif (
function(a_ ) * function(a_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
lowerCamelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(a_ ) == 0:
return mid
elif function(a_ ) * function(a_ ) < 0:
lowerCamelCase : str = mid
else:
lowerCamelCase : Dict = mid
lowerCamelCase : Union[str, Any] = start + (end - start) / 2.0
return mid
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 718
|
"""simple docstring"""
def UpperCAmelCase ( a_ ):
'''simple docstring'''
try:
lowerCamelCase : List[str] = float(a_ )
except ValueError:
raise ValueError('Please enter a valid number' )
lowerCamelCase : Dict = decimal - int(a_ )
if fractional_part == 0:
return int(a_ ), 1
else:
lowerCamelCase : Tuple = len(str(a_ ).split('.' )[1] )
lowerCamelCase : int = int(decimal * (10**number_of_frac_digits) )
lowerCamelCase : List[str] = 10**number_of_frac_digits
lowerCamelCase , lowerCamelCase : int = denominator, numerator
while True:
lowerCamelCase : Tuple = dividend % divisor
if remainder == 0:
break
lowerCamelCase , lowerCamelCase : Union[str, Any] = divisor, remainder
lowerCamelCase , lowerCamelCase : Any = numerator / divisor, denominator / divisor
return int(a_ ), int(a_ )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 133
| 0
|
from math import sqrt
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (
number >= 0
), "'number' must been an int and positive"
SCREAMING_SNAKE_CASE : Any = True
# 0 and 1 are none primes.
if number <= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = False
for divisor in range(2 , int(round(sqrt(lowercase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
break
# precondition
assert isinstance(lowercase , lowercase ), "'status' must been from type bool"
return status
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
SCREAMING_SNAKE_CASE : Union[str, Any] = list(range(2 , n + 1 ) )
SCREAMING_SNAKE_CASE : List[str] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowercase ) ):
for j in range(i + 1 , len(lowercase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
SCREAMING_SNAKE_CASE : Dict = 0
# filters actual prime numbers.
SCREAMING_SNAKE_CASE : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (n > 2), "'N' must been an int and > 2"
SCREAMING_SNAKE_CASE : int = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowercase ):
ans.append(lowercase )
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and number >= 0, "'number' must been an int and >= 0"
SCREAMING_SNAKE_CASE : List[Any] = [] # this list will be returns of the function.
# potential prime number factors.
SCREAMING_SNAKE_CASE : Dict = 2
SCREAMING_SNAKE_CASE : int = number
if number == 0 or number == 1:
ans.append(lowercase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowercase ):
while quotient != 1:
if is_prime(lowercase ) and (quotient % factor == 0):
ans.append(lowercase )
quotient /= factor
else:
factor += 1
else:
ans.append(lowercase )
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE : List[str] = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE : Any = prime_factorization(lowercase )
SCREAMING_SNAKE_CASE : List[str] = max(lowercase )
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE : List[str] = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE : Optional[int] = prime_factorization(lowercase )
SCREAMING_SNAKE_CASE : List[Any] = min(lowercase )
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowercase ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowercase ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert (
isinstance(lowercase , lowercase ) and (number > 2) and is_even(lowercase )
), "'number' must been an int, even and > 2"
SCREAMING_SNAKE_CASE : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
SCREAMING_SNAKE_CASE : Tuple = get_prime_numbers(lowercase )
SCREAMING_SNAKE_CASE : Tuple = len(lowercase )
# run variable for while-loops.
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Dict = None
# exit variable. for break up the loops
SCREAMING_SNAKE_CASE : Any = True
while i < len_pn and loop:
SCREAMING_SNAKE_CASE : Any = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
SCREAMING_SNAKE_CASE : str = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowercase , lowercase )
and (len(lowercase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE : List[str] = 0
while numbera != 0:
SCREAMING_SNAKE_CASE : List[str] = numbera % numbera
SCREAMING_SNAKE_CASE : Dict = numbera
SCREAMING_SNAKE_CASE : Optional[Any] = rest
# precondition
assert isinstance(lowercase , lowercase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
SCREAMING_SNAKE_CASE : Optional[int] = prime_factorization(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = prime_factorization(lowercase )
elif numbera == 1 or numbera == 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Optional[int] = max(lowercase , lowercase )
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
SCREAMING_SNAKE_CASE : Optional[int] = prime_fac_a.count(lowercase )
SCREAMING_SNAKE_CASE : Tuple = prime_fac_a.count(lowercase )
for _ in range(max(lowercase , lowercase ) ):
ans *= n
else:
SCREAMING_SNAKE_CASE : List[Any] = prime_fac_a.count(lowercase )
for _ in range(lowercase ):
ans *= n
done.append(lowercase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
SCREAMING_SNAKE_CASE : int = prime_fac_a.count(lowercase )
for _ in range(lowercase ):
ans *= n
done.append(lowercase )
# precondition
assert isinstance(lowercase , lowercase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (n >= 0), "'number' must been a positive int"
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowercase ):
ans += 1
# precondition
assert isinstance(lowercase , lowercase ) and is_prime(
lowercase ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
is_prime(lowercase ) and is_prime(lowercase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
SCREAMING_SNAKE_CASE : str = p_number_a + 1 # jump to the next number
SCREAMING_SNAKE_CASE : Optional[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowercase ):
number += 1
while number < p_number_a:
ans.append(lowercase )
number += 1
# fetch the next prime number.
while not is_prime(lowercase ):
number += 1
# precondition
assert (
isinstance(lowercase , lowercase )
and ans[0] != p_number_a
and ans[len(lowercase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (n >= 1), "'n' must been int and >= 1"
SCREAMING_SNAKE_CASE : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowercase )
# precondition
assert ans[0] == 1 and ans[len(lowercase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (
number > 1
), "'number' must been an int and >= 1"
SCREAMING_SNAKE_CASE : Optional[int] = get_divisors(lowercase )
# precondition
assert (
isinstance(lowercase , lowercase )
and (divisors[0] == 1)
and (divisors[len(lowercase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
SCREAMING_SNAKE_CASE : List[str] = gcd(abs(lowercase ) , abs(lowercase ) )
# precondition
assert (
isinstance(lowercase , lowercase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (n >= 0), "'n' must been a int and >= 0"
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (n >= 0), "'n' must been an int and >= 0"
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : int = 1 # this will be return
for _ in range(n - 1 ):
SCREAMING_SNAKE_CASE : str = ans
ans += fiba
SCREAMING_SNAKE_CASE : Tuple = tmp
return ans
| 62
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62
| 1
|
def lowerCAmelCase ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = len(_lowerCAmelCase ) + 1
UpperCAmelCase__ = len(_lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase__ = [[0 for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
# since string of zero length match pattern of zero length
UpperCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCAmelCase ):
UpperCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCAmelCase ):
UpperCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCAmelCase ):
for j in range(1 , _lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase__ = dp[i - 1][j]
else:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_lowerCAmelCase : Union[str, Any] = "aab"
_lowerCAmelCase : Union[str, Any] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 364
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = (IPNDMScheduler,)
UpperCAmelCase_ = (("""num_inference_steps""", 50),)
def UpperCAmelCase_ ( self :List[str] , **lowerCamelCase :List[Any] ) -> List[str]:
UpperCAmelCase__ = {"num_train_timesteps": 1000}
config.update(**lowerCamelCase )
return config
def UpperCAmelCase_ ( self :str , lowerCamelCase :Union[str, Any]=0 , **lowerCamelCase :str ) -> str:
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("num_inference_steps" , lowerCamelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self :Tuple ) -> Tuple:
pass
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :List[str]=0 , **lowerCamelCase :List[str] ) -> Optional[int]:
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("num_inference_steps" , lowerCamelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self :int , **lowerCamelCase :Any ) -> int:
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ = model(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ = model(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self :Dict ) -> Optional[Any]:
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("num_inference_steps" , lowerCamelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(lowerCamelCase )
elif num_inference_steps is not None and not hasattr(lowerCamelCase , "set_timesteps" ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.timesteps[5]
UpperCAmelCase__ = scheduler.timesteps[6]
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self :List[str] ) -> Tuple:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase , time_step=lowerCamelCase )
def UpperCAmelCase_ ( self :Tuple ) -> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=lowerCamelCase )
def UpperCAmelCase_ ( self :Any ) -> Dict:
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 364
| 1
|
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = "x" ,lowerCAmelCase__ = 10**-10 ,lowerCAmelCase__ = 1 ,):
lowerCamelCase_ = symbols(lowerCAmelCase__ )
lowerCamelCase_ = lambdify(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = lambdify(lowerCAmelCase__ ,diff(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = starting_point
while True:
if diff_function(lowerCAmelCase__ ) != 0:
lowerCamelCase_ = prev_guess - multiplicity * func(lowerCAmelCase__ ) / diff_function(
lowerCAmelCase__ )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCamelCase_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f"{newton_raphson('exp(x) - 1', 10, precision=0.005)}",
)
# Find root of cos(x)
print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 29
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A__ : Optional[int] = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def a ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = True
while ask_again:
lowercase__ = input(lowerCamelCase_ )
try:
if default is not None and len(lowerCamelCase_ ) == 0:
return default
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_=[] , lowerCamelCase_=None , lowerCamelCase_=0 ):
'''simple docstring'''
lowercase__ = BulletMenu(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = menu.run(default_choice=lowerCamelCase_ )
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _UpperCAmelCase ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def lowercase__ ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = super()._format_usage(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = usage.replace('''<command> [<args>] ''', '''''' )
return usage
| 671
|
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671
| 1
|
"""simple docstring"""
from timeit import timeit
A : Dict = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
UpperCamelCase__ = 0
UpperCamelCase__ = len(_snake_case ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
UpperCamelCase__ = len(_snake_case ) // 2
UpperCamelCase__ = len(_snake_case )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_snake_case ) )
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
if len(_snake_case ) <= 2:
return True
if s[0] == s[len(_snake_case ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
return s == s[::-1]
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
UpperCamelCase__ = F'all({name}(key) is value for key, value in test_data.items())'
UpperCamelCase__ = F'from __main__ import test_data, {name}'
UpperCamelCase__ = 50_00_00
UpperCamelCase__ = timeit(stmt=_snake_case , setup=_snake_case , number=_snake_case )
print(F'{name:<35} finished {number:,} runs in {result:.5f} seconds' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 516
|
"""simple docstring"""
from __future__ import annotations
import time
A : List[str] = list[tuple[int, int]]
A : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Node | None ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = pos_x
UpperCamelCase__ = pos_y
UpperCamelCase__ = (pos_y, pos_x)
UpperCamelCase__ = goal_x
UpperCamelCase__ = goal_y
UpperCamelCase__ = parent
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :tuple[int, int] , lowerCamelCase_ :tuple[int, int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCamelCase_ )
UpperCamelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCamelCase_ )
UpperCamelCase__ = [self.start]
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Any ) -> Path | None:
"""simple docstring"""
while self.node_queue:
UpperCamelCase__ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCamelCase__ = True
return self.retrace_path(lowerCamelCase_ )
UpperCamelCase__ = self.get_successors(lowerCamelCase_ )
for node in successors:
self.node_queue.append(lowerCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase__ ( self :str , lowerCamelCase_ :Node ) -> list[Node]:
"""simple docstring"""
UpperCamelCase__ = []
for action in delta:
UpperCamelCase__ = parent.pos_x + action[1]
UpperCamelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , lowerCamelCase_ ) )
return successors
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Node | None ) -> Path:
"""simple docstring"""
UpperCamelCase__ = node
UpperCamelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase__ = current_node.parent
path.reverse()
return path
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase__ = BreadthFirstSearch(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = BreadthFirstSearch(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = False
def lowerCamelCase__ ( self :int ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCamelCase__ = self.fwd_bfs.node_queue.pop(0 )
UpperCamelCase__ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCamelCase__ = True
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = current_bwd_node
UpperCamelCase__ = current_fwd_node
UpperCamelCase__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCamelCase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCamelCase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCamelCase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :Node , lowerCamelCase_ :Node ) -> Path:
"""simple docstring"""
UpperCamelCase__ = self.fwd_bfs.retrace_path(lowerCamelCase_ )
UpperCamelCase__ = self.bwd_bfs.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A : str = (0, 0)
A : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Any = time.time()
A : Optional[int] = BreadthFirstSearch(init, goal)
A : List[str] = bfs.search()
A : Dict = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
A : Optional[int] = time.time()
A : Any = BidirectionalBreadthFirstSearch(init, goal)
A : List[Any] = bd_bfs.search()
A : Dict = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 516
| 1
|
'''simple docstring'''
import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[str] , __A : List[Any] , __A : Tuple = None , ) -> Optional[int]:
"""simple docstring"""
a_ : Any = np.shape(__SCREAMING_SNAKE_CASE )
a_ : int = np.shape(__SCREAMING_SNAKE_CASE )
a_ : str = np.shape(__SCREAMING_SNAKE_CASE )
if shape_a[0] != shape_b[0]:
a_ : Union[str, Any] = (
"Expected the same number of rows for A and B. "
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__SCREAMING_SNAKE_CASE )
if shape_b[1] != shape_c[1]:
a_ : str = (
"Expected the same number of columns for B and C. "
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__SCREAMING_SNAKE_CASE )
a_ : Any = pseudo_inv
if a_inv is None:
try:
a_ : Optional[Any] = np.linalg.inv(__SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
a_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a_ : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
a_ : List[Any] = np.array([[2, 1], [6, 3]] )
a_ : str = schur_complement(lowercase_ , lowercase_ , lowercase_ )
a_ : int = np.block([[a, b], [b.T, c]] )
a_ : int = np.linalg.det(lowercase_ )
a_ : int = np.linalg.det(lowercase_ )
a_ : str = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a_ : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
a_ : List[Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
a_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
a_ : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 704
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : List[str] = MvpTokenizer
snake_case__ : Dict = MvpTokenizerFast
snake_case__ : Any = True
snake_case__ : Optional[int] = filter_roberta_detectors
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
super().setUp()
a_ : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
a_ : int = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
a_ : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a_ : str = {'unk_token': '<unk>'}
a_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a_ : List[Any] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : List[str] = tokenizer(SCREAMING_SNAKE_CASE__ , max_length=len(SCREAMING_SNAKE_CASE__ ) , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
a_ : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test that special tokens are reset
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('labels' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE__ )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : List[str] = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , max_length=3_2 , padding='max_length' , return_tensors='pt' )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : Dict = tokenizer(
['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
a_ : Dict = ['A long paragraph for summarization.']
a_ : Dict = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , text_target=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
a_ : Dict = inputs['input_ids']
a_ : str = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a_ : List[str] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : int = 'A, <mask> AllenNLP sentence.'
a_ : Optional[Any] = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : int = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
a_ : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 443
| 0
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 539
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
_lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def _snake_case ( ):
A = os.path.dirname(os.path.realpath(snake_case__ ) )
A = os.path.join(snake_case__ , 'words.txt' )
A = ''
with open(snake_case__ ) as f:
A = f.readline()
A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution())
| 91
| 0
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def UpperCAmelCase ( A : float , A : float , A : bool = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(A ), magnitude * sin(A )]
return [magnitude * cos(radians(A ) ), magnitude * sin(radians(A ) )]
def UpperCAmelCase ( A : NDArray[floataa] , A : NDArray[floataa] , A : float = 10**-1 ):
'''simple docstring'''
_UpperCAmelCase = cross(A , A )
_UpperCAmelCase = sum(A )
return abs(A ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 24
|
"""simple docstring"""
import os
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' )
with open(A ) as file_hand:
return str(sum(int(A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 24
| 1
|
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Union[str, Any] = len(_lowerCamelCase )
# We need to create solution object to save path.
__snake_case : Optional[Any] = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
__snake_case : int = run_maze(_lowerCamelCase , 0 , 0 , _lowerCamelCase )
if solved:
print("""\n""".join(str(_lowerCamelCase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : str = len(_lowerCamelCase )
# Final check point.
if i == j == (size - 1):
__snake_case : Tuple = 1
return True
__snake_case : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
__snake_case : int = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__snake_case : Union[str, Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__snake_case : Union[str, Any] = 1
# check for directions
if (
run_maze(_lowerCamelCase , i + 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j + 1 , _lowerCamelCase )
or run_maze(_lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j - 1 , _lowerCamelCase )
):
return True
__snake_case : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
|
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 0
|
from collections import deque
from .hash_table import HashTable
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Optional[int] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
super().__init__(*__a , **__a)
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
_snake_case : Optional[int] = deque([]) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__a)
_snake_case : Tuple = self.values[key]
def UpperCamelCase_ ( self : Any) -> Tuple:
"""simple docstring"""
return (
sum(self.charge_factor - len(__a) for slot in self.values)
/ self.size_table
* self.charge_factor
)
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str=None) -> List[str]:
"""simple docstring"""
if not (
len(self.values[key]) == self.charge_factor and self.values.count(__a) == 0
):
return key
return super()._collision_resolution(__a , __a)
| 712
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a__ = logging.getLogger(__name__)
def lowercase ( SCREAMING_SNAKE_CASE__ : torch.nn.Module , SCREAMING_SNAKE_CASE__ : BnbQuantizationConfig , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ) -> int:
_snake_case : int = bnb_quantization_config.load_in_abit
_snake_case : Tuple = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
_snake_case : List[Any] = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(device_map.keys() ) > 1:
_snake_case : Tuple = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_snake_case : Union[str, Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_snake_case : Optional[Any] = []
_snake_case : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE__ )
# compatibility with peft
_snake_case : Union[str, Any] = load_in_abit
_snake_case : Any = load_in_abit
_snake_case : Optional[int] = get_parameter_device(SCREAMING_SNAKE_CASE__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
_snake_case : int = replace_with_bnb_layers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ )
# convert param to the right dtype
_snake_case : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_snake_case : Union[str, Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
_snake_case : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE__ ):
param.to(SCREAMING_SNAKE_CASE__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
_snake_case : Optional[int] = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_memory=SCREAMING_SNAKE_CASE__ , no_split_module_classes=SCREAMING_SNAKE_CASE__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_snake_case : Union[str, Any] = True
_snake_case : Any = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE__ , offload_state_dict=SCREAMING_SNAKE_CASE__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , offload_dir=SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Any=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
_snake_case : Dict = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
_snake_case : int = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_snake_case : Tuple = {}
_snake_case : List[str] = special_dtypes
_snake_case : int = no_split_module_classes
_snake_case : List[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_snake_case : Optional[int] = get_balanced_memory(
SCREAMING_SNAKE_CASE__ , low_zero=(device_map == """balanced_low_0""") , max_memory=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_snake_case : str = max_memory
_snake_case : Optional[int] = infer_auto_device_map(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# check if don't have any quantized module on the cpu
_snake_case : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_snake_case : Dict = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None ) -> List[Any]:
if modules_to_not_convert is None:
_snake_case : Tuple = []
_snake_case , _snake_case : str = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , ) -> Optional[Any]:
_snake_case : List[str] = False
for name, module in model.named_children():
if current_key_name is None:
_snake_case : List[str] = []
current_key_name.append(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_snake_case : int = """.""".join(SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_snake_case : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_snake_case : List[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_snake_case : Any = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
_snake_case : List[str] = module.weight.data
if module.bias is not None:
_snake_case : List[Any] = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = True
if len(list(module.children() ) ) > 0:
_snake_case , _snake_case : Optional[int] = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> int:
# Create a copy of the model
with init_empty_weights():
_snake_case : Optional[Any] = deepcopy(SCREAMING_SNAKE_CASE__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_snake_case : Tuple = find_tied_parameters(SCREAMING_SNAKE_CASE__ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_snake_case : Optional[Any] = sum(SCREAMING_SNAKE_CASE__ , [] )
_snake_case : Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) > 0
# Check if it is a base model
_snake_case : str = False
if hasattr(SCREAMING_SNAKE_CASE__ , """base_model_prefix""" ):
_snake_case : List[Any] = not hasattr(SCREAMING_SNAKE_CASE__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_snake_case : str = list(model.named_children() )
_snake_case : Dict = [list_modules[-1][0]]
# add last module together with tied weights
_snake_case : Optional[int] = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = list(set(SCREAMING_SNAKE_CASE__ ) ) + list(SCREAMING_SNAKE_CASE__ )
# remove ".weight" from the keys
_snake_case : Union[str, Any] = [""".weight""", """.bias"""]
_snake_case : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_snake_case : Optional[Any] = name.replace(SCREAMING_SNAKE_CASE__ , """""" )
filtered_module_names.append(SCREAMING_SNAKE_CASE__ )
return filtered_module_names
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE__ , bnb.nn.Linearabit ):
return True
return False
def lowercase ( SCREAMING_SNAKE_CASE__ : nn.Module ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , dtype=SCREAMING_SNAKE_CASE__ , value=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = param_name
_snake_case : List[Any] = model
if "." in tensor_name:
_snake_case : str = tensor_name.split(""".""" )
for split in splits[:-1]:
_snake_case : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
_snake_case : Tuple = new_module
_snake_case : Dict = splits[-1]
# offload weights
_snake_case : List[str] = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , )
else:
offload_weight(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
offload_weight(SCREAMING_SNAKE_CASE__ , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """meta""" , dtype=SCREAMING_SNAKE_CASE__ , value=torch.empty(*param.size() ) )
| 198
| 0
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool = field(default=UpperCAmelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=UpperCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path, GenerationConfig]] = field(
default=UpperCAmelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_lowercase = super().to_dict()
for k, v in d.items():
if isinstance(__A ,__A ):
_lowercase = v.to_dict()
return d
| 67
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __a :
@staticmethod
def UpperCamelCase ( *snake_case_ : Any , **snake_case_ : str)-> int:
pass
@is_pipeline_test
@require_vision
class __a ( unittest.TestCase ):
@require_torch
def UpperCamelCase ( self : Dict)-> List[str]:
__lowerCAmelCase =pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""a""", """b""", """c"""])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(snake_case_) , [
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}],
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """c"""}, {"""score""": 0.3_3_3, """label""": """b"""}],
] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
] , )
@require_tf
def UpperCamelCase ( self : Dict)-> Optional[Any]:
__lowerCAmelCase =pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""")
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""a""", """b""", """c"""])
self.assertEqual(
nested_simplify(snake_case_) , [{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
] , )
@slow
@require_torch
def UpperCamelCase ( self : Any)-> Dict:
__lowerCAmelCase =pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""cat""", """plane""", """remote"""])
self.assertEqual(
nested_simplify(snake_case_) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def UpperCamelCase ( self : Optional[int])-> int:
__lowerCAmelCase =pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""")
# This is an image of 2 cats with remotes and no planes
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""cat""", """plane""", """remote"""])
self.assertEqual(
nested_simplify(snake_case_) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
| 354
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_A = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
_A = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
_A = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
_A = {F"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = FunnelTokenizer
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = 2
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<sep>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<cls>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_="##" , **UpperCAmelCase_ , ) -> List[Any]:
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , clean_text=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , wordpieces_prefix=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase_ ) != tokenize_chinese_chars
):
lowerCamelCase : List[str] = getattr(UpperCAmelCase_ , normalizer_state.pop('type' ) )
lowerCamelCase : Union[str, Any] = do_lower_case
lowerCamelCase : Optional[int] = strip_accents
lowerCamelCase : Union[str, Any] = tokenize_chinese_chars
lowerCamelCase : List[str] = normalizer_class(**UpperCAmelCase_ )
lowerCamelCase : int = do_lower_case
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=None ) -> Union[str, Any]:
lowerCamelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> Tuple[str]:
lowerCamelCase : List[str] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 706
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( a_ ):
'''simple docstring'''
if num <= 0:
lowerCamelCase : Tuple = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(a_ )
lowerCamelCase : Optional[Any] = [True] * (num + 1)
lowerCamelCase : int = []
lowerCamelCase : Dict = 2
lowerCamelCase : List[str] = int(math.sqrt(a_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a_ )
# Set multiples of start be False
for i in range(start * start, num + 1, a_ ):
if sieve[i] is True:
lowerCamelCase : Optional[int] = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(a_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 133
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Tuple ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 434
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] ='▁'
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
SCREAMING_SNAKE_CASE__ : Any ={
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] ={'vinai/bartpho-syllable': 1024}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , _lowercase , _lowercase , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase = None , **_lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
_lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
_lowerCamelCase : Optional[int] = vocab_file
_lowerCamelCase : Union[str, Any] = monolingual_vocab_file
_lowerCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowercase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Any = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_lowercase ) not in self.fairseq_tokens_to_ids:
_lowerCamelCase : int = cnt
cnt += 1
with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_lowerCamelCase : List[Any] = line.strip().split()[0]
_lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids )
if str(_lowercase ) not in self.fairseq_tokens_to_ids:
_lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids )
_lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowercase ) -> Optional[int]:
_lowerCamelCase : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ ( self , _lowercase , _lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
_lowerCamelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def a__ ( self , _lowercase , _lowercase = None ) -> List[int]:
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self ) -> Optional[int]:
return len(self.fairseq_ids_to_tokens )
def a__ ( self ) -> List[str]:
_lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self , _lowercase ) -> List[str]:
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def a__ ( self , _lowercase ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def a__ ( self , _lowercase ) -> List[Any]:
return self.fairseq_ids_to_tokens[index]
def a__ ( self , _lowercase ) -> Tuple:
_lowerCamelCase : List[Any] = ''''''.join(_lowercase ).replace(_lowercase , ''' ''' ).strip()
return out_string
def a__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Tuple = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : Dict = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , '''wb''' ) as fi:
_lowerCamelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_lowercase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _lowercase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_lowercase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 434
| 1
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCamelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class _lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =UNetaDModel
lowerCAmelCase__ ="sample"
@property
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : str =4
snake_case__ : Union[str, Any] =3
snake_case__ : int =(32, 32)
snake_case__ : Any =floats_tensor((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =torch.tensor([10] ).to(__SCREAMING_SNAKE_CASE )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
return (3, 32, 32)
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Dict ={
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
snake_case__ : Optional[Any] =self.dummy_input
return init_dict, inputs_dict
class _lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =UNetaDModel
lowerCAmelCase__ ="sample"
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] =4
snake_case__ : Optional[int] =4
snake_case__ : Any =(32, 32)
snake_case__ : int =floats_tensor((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict =torch.tensor([10] ).to(__SCREAMING_SNAKE_CASE )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return (4, 32, 32)
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : str ={
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
snake_case__ : Optional[Any] =self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__, snake_case__ : List[str] =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
snake_case__, snake_case__ : Union[str, Any] =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : int =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
snake_case__, snake_case__ : List[str] =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=__SCREAMING_SNAKE_CASE )
model_accelerate.to(__SCREAMING_SNAKE_CASE )
model_accelerate.eval()
snake_case__ : List[Any] =torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ : Any =noise.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =torch.tensor([10] * noise.shape[0] ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =model_accelerate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case__, snake_case__ : List[Any] =UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=__SCREAMING_SNAKE_CASE , low_cpu_mem_usage=__SCREAMING_SNAKE_CASE )
model_normal_load.to(__SCREAMING_SNAKE_CASE )
model_normal_load.eval()
snake_case__ : Dict =model_normal_load(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )['''sample''']
assert torch_all_close(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1e-3 )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : int =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ : Dict =noise.to(__SCREAMING_SNAKE_CASE )
snake_case__ : str =torch.tensor([10] * noise.shape[0] ).to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
snake_case__ : Union[str, Any] =model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
snake_case__ : Union[str, Any] =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case__ : Optional[int] =torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1e-3 ) )
class _lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =UNetaDModel
lowerCAmelCase__ ="sample"
@property
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE=(32, 32) ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] =4
snake_case__ : Optional[Any] =3
snake_case__ : str =floats_tensor((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Any =torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__SCREAMING_SNAKE_CASE )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[str] ={
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
snake_case__ : Any =self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__, snake_case__ : Union[str, Any] =UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : int =self.dummy_input
snake_case__ : Optional[Any] =floats_tensor((4, 3) + (256, 256) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int =noise
snake_case__ : Optional[Any] =model(**__SCREAMING_SNAKE_CASE )
assert image is not None, "Make sure output is not None"
@slow
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
snake_case__ : Tuple =UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict =4
snake_case__ : Optional[int] =3
snake_case__ : List[str] =(256, 256)
snake_case__ : List[Any] =torch.ones((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : str =torch.tensor(batch_size * [1e-4] ).to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
snake_case__ : Optional[Any] =model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
snake_case__ : Tuple =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ : str =torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1e-2 ) )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] =UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =4
snake_case__ : Dict =3
snake_case__ : List[Any] =(32, 32)
snake_case__ : Union[str, Any] =torch.ones((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =torch.tensor(batch_size * [1e-4] ).to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
snake_case__ : int =model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
snake_case__ : int =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ : Union[str, Any] =torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1e-2 ) )
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
| 706
|
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ =['''input_features''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=1_6000 , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =num_mel_bins
snake_case__ : int =do_ceptral_normalize
snake_case__ : Dict =normalize_means
snake_case__ : str =normalize_vars
snake_case__ : Optional[Any] =True
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
snake_case__ : List[str] =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
snake_case__ : int =torch.from_numpy(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
snake_case__ : Optional[int] =ta_kaldi.fbank(__SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
snake_case__ : Any =x[:input_length].mean(axis=0 )
snake_case__ : Optional[Any] =np.subtract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if normalize_vars:
snake_case__ : int =x[:input_length].std(axis=0 )
snake_case__ : Optional[Any] =np.divide(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
snake_case__ : Tuple =padding_value
# make sure array is in float32
snake_case__ : Tuple =x.astype(np.floataa )
return x
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
"""simple docstring"""
snake_case__ : Union[str, Any] =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
snake_case__ : List[Any] =isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
snake_case__ : Optional[int] =is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ : str =[np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
snake_case__ : Any =np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ : Optional[Any] =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ : Optional[Any] =[raw_speech]
# extract fbank features
snake_case__ : List[Any] =[self._extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
snake_case__ : Optional[int] =BatchFeature({'''input_features''': features} )
snake_case__ : List[Any] =self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# make sure list is in array format
snake_case__ : int =padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict =[np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
snake_case__ : Tuple =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
snake_case__ : Dict =[np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
snake_case__ : List[str] =(
np.array(__SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
snake_case__ : Union[str, Any] =self.normalize(
padded_inputs['''input_features'''] , attention_mask=__SCREAMING_SNAKE_CASE )
if return_tensors is not None:
snake_case__ : Optional[int] =padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 408
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowercase :
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=13 , SCREAMING_SNAKE_CASE_ : Any=10 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=32 , SCREAMING_SNAKE_CASE_ : Any=5 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=10 , SCREAMING_SNAKE_CASE_ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.9 , SCREAMING_SNAKE_CASE_ : List[Any]=None , ) -> Union[str, Any]:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = patch_size
__snake_case = tubelet_size
__snake_case = num_frames
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = mask_ratio
__snake_case = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__snake_case = (image_size // patch_size) ** 2
__snake_case = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__snake_case = int(mask_ratio * self.seq_length )
def a ( self : List[str] ) -> Optional[Any]:
__snake_case = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def a ( self : Union[str, Any] ) -> Optional[Any]:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
__snake_case = VideoMAEModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = VideoMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__snake_case = torch.ones((self.num_masks,) )
__snake_case = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__snake_case = mask.expand(self.batch_size , -1 ).bool()
__snake_case = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# model only returns predictions for masked patches
__snake_case = mask.sum().item()
__snake_case = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def a ( self : List[str] ) -> Any:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE : int = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Tuple = False
def a ( self : List[str] ) -> Union[str, Any]:
__snake_case = VideoMAEModelTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> Any:
__snake_case = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__snake_case = torch.ones((self.model_tester.num_masks,) )
__snake_case = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__snake_case = mask.expand(self.model_tester.batch_size , -1 ).bool()
__snake_case = bool_masked_pos.to(SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in [
*get_values(SCREAMING_SNAKE_CASE_ ),
]:
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def a ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def a ( self : Tuple ) -> str:
pass
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def a ( self : List[Any] ) -> Tuple:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> Optional[int]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
@slow
def a ( self : Dict ) -> Any:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = VideoMAEModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
for model_class in self.all_model_classes:
__snake_case = self.model_tester.seq_length - self.model_tester.num_masks
__snake_case = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__snake_case = True
__snake_case = False
__snake_case = True
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__snake_case = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case = True
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__snake_case = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__snake_case = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
__snake_case = True
__snake_case = True
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE_ ) )
__snake_case = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def a ( self : List[str] ) -> str:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__snake_case = outputs.hidden_states
__snake_case = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = self.model_tester.seq_length - self.model_tester.num_masks
__snake_case = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self : Dict ) -> Optional[int]:
pass
def _a () -> Tuple:
"""simple docstring"""
__snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
__snake_case = np.load(lowercase__ )
return list(lowercase__ )
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def a ( self : Any ) -> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def a ( self : Any ) -> Dict:
__snake_case = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
SCREAMING_SNAKE_CASE_ )
__snake_case = self.default_image_processor
__snake_case = prepare_video()
__snake_case = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__snake_case = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
__snake_case = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
def a ( self : Dict ) -> Optional[int]:
__snake_case = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = self.default_image_processor
__snake_case = prepare_video()
__snake_case = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# add boolean mask, indicating which patches to mask
__snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
__snake_case = torch.load(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__snake_case = torch.Size([1, 1408, 1536] )
__snake_case = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=SCREAMING_SNAKE_CASE_ )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__snake_case = torch.tensor([0.5_1_4_2] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.loss , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__snake_case = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=SCREAMING_SNAKE_CASE_ ).to(
SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE_ )
__snake_case = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.loss , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 56
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BioGptTokenizer
SCREAMING_SNAKE_CASE_ = False
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowerCamelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_snake_case ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = '''lower newer'''
return input_text, output_text
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = BioGptTokenizer(self.vocab_file , self.merges_file )
__lowerCamelCase = '''lower'''
__lowerCamelCase = ['''low''', '''er</w>''']
__lowerCamelCase = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCamelCase = tokens + ['''<unk>''']
__lowerCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=_snake_case )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_snake_case )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(_snake_case )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 316
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : Optional[int]=32 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : Any=10 , UpperCAmelCase : Tuple=[10, 20, 30, 40] , UpperCAmelCase : Optional[int]=[1, 1, 2, 1] , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Any=True , UpperCAmelCase : str="relu" , UpperCAmelCase : Dict=3 , UpperCAmelCase : List[Any]=None , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : List[Any] = batch_size
__lowerCamelCase : Dict = image_size
__lowerCamelCase : Union[str, Any] = num_channels
__lowerCamelCase : Dict = embeddings_size
__lowerCamelCase : Optional[Any] = hidden_sizes
__lowerCamelCase : int = depths
__lowerCamelCase : str = is_training
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : List[str] = num_labels
__lowerCamelCase : Dict = scope
__lowerCamelCase : Dict = len(__a )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : int ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : int , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int ):
__lowerCamelCase : Tuple = RegNetModel(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : Optional[int] = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ):
__lowerCamelCase : str = self.num_labels
__lowerCamelCase : List[Any] = RegNetForImageClassification(__a )
model.to(__a )
model.eval()
__lowerCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__lowerCamelCase : List[Any] = config_and_inputs
__lowerCamelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
snake_case__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
snake_case__ = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Optional[Any] = RegNetModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCamelCase__ ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : Optional[int] ):
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def lowerCamelCase__ ( self : List[str] ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def lowerCamelCase__ ( self : Any ):
pass
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(__a )
__lowerCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Tuple = model_class(config=__a )
for name, module in model.named_modules():
if isinstance(__a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCamelCase__ ( self : List[str] ):
def check_hidden_states_output(UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] ):
__lowerCamelCase : Optional[int] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowerCamelCase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : str = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCamelCase : List[Any] = layer_type
__lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
check_hidden_states_output(__a , __a , __a )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Tuple = RegNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase_ ( ) -> str:
'''simple docstring'''
__lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Union[str, Any] ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Dict = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__a )
__lowerCamelCase : List[str] = self.default_image_processor
__lowerCamelCase : List[str] = prepare_img()
__lowerCamelCase : int = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
__lowerCamelCase : List[str] = model(**__a )
# verify the logits
__lowerCamelCase : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__lowerCamelCase : Optional[Any] = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 717
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__A = (720, 1280) # Height, Width
__A = (0.4, 0.6) # if height or width lower than this scale, drop it.
__A = 1 / 100
__A = ''''''
__A = ''''''
__A = ''''''
__A = 250
def lowercase_ ( ) -> None:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : List[Any] = get_dataset(_lowerCamelCase , _lowerCamelCase )
for index in range(_lowerCamelCase ):
__lowerCamelCase : Optional[Any] = random.sample(range(len(_lowerCamelCase ) ) , 4 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = update_image_and_anno(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , filter_scale=_lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCamelCase : Tuple = random_chars(32 )
__lowerCamelCase : Dict = path.split(os.sep )[-1].rsplit("." , 1 )[0]
__lowerCamelCase : List[str] = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__lowerCamelCase : List[Any] = []
for anno in new_annos:
__lowerCamelCase : Any = anno[3] - anno[1]
__lowerCamelCase : Optional[int] = anno[4] - anno[2]
__lowerCamelCase : Optional[int] = anno[1] + width / 2
__lowerCamelCase : Union[str, Any] = anno[2] + height / 2
__lowerCamelCase : int = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(_lowerCamelCase )
with open(F"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> tuple[list, list]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Any = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , "*.txt" ) ):
__lowerCamelCase : List[Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_lowerCamelCase ) as in_file:
__lowerCamelCase : Tuple = in_file.readlines()
__lowerCamelCase : List[str] = os.path.join(_lowerCamelCase , F"""{label_name}.jpg""" )
__lowerCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
__lowerCamelCase : str = obj_list.rstrip("\n" ).split(" " )
__lowerCamelCase : Union[str, Any] = float(obj[1] ) - float(obj[3] ) / 2
__lowerCamelCase : Tuple = float(obj[2] ) - float(obj[4] ) / 2
__lowerCamelCase : Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2
__lowerCamelCase : Any = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def lowercase_ ( _lowerCamelCase: list , _lowerCamelCase: list , _lowerCamelCase: list[int] , _lowerCamelCase: tuple[int, int] , _lowerCamelCase: tuple[float, float] , _lowerCamelCase: float = 0.0 , ) -> tuple[list, list, str]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowerCamelCase : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCamelCase : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCamelCase : int = int(scale_x * output_size[1] )
__lowerCamelCase : Optional[Any] = int(scale_y * output_size[0] )
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[Any] = []
for i, index in enumerate(_lowerCamelCase ):
__lowerCamelCase : List[str] = all_img_list[index]
path_list.append(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = all_annos[index]
__lowerCamelCase : List[str] = cva.imread(_lowerCamelCase )
if i == 0: # top-left
__lowerCamelCase : List[str] = cva.resize(_lowerCamelCase , (divid_point_x, divid_point_y) )
__lowerCamelCase : Any = img
for bbox in img_annos:
__lowerCamelCase : str = bbox[1] * scale_x
__lowerCamelCase : Union[str, Any] = bbox[2] * scale_y
__lowerCamelCase : Optional[int] = bbox[3] * scale_x
__lowerCamelCase : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowerCamelCase : str = cva.resize(_lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
__lowerCamelCase : Any = img
for bbox in img_annos:
__lowerCamelCase : List[Any] = scale_x + bbox[1] * (1 - scale_x)
__lowerCamelCase : List[Any] = bbox[2] * scale_y
__lowerCamelCase : Tuple = scale_x + bbox[3] * (1 - scale_x)
__lowerCamelCase : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowerCamelCase : Any = cva.resize(_lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
__lowerCamelCase : List[str] = img
for bbox in img_annos:
__lowerCamelCase : Any = bbox[1] * scale_x
__lowerCamelCase : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__lowerCamelCase : Dict = bbox[3] * scale_x
__lowerCamelCase : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowerCamelCase : int = cva.resize(
_lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowerCamelCase : Optional[Any] = img
for bbox in img_annos:
__lowerCamelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
__lowerCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
__lowerCamelCase : int = scale_x + bbox[3] * (1 - scale_x)
__lowerCamelCase : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowerCamelCase : str = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase_ ( _lowerCamelCase: int ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__lowerCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 366
| 0
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
A : Optional[int] = getLogger(__name__)
A : Union[str, Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Tuple="summarization" , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> Dict:
_lowercase = Path(SCREAMING_SNAKE_CASE_ ).open("""w""" , encoding="""utf-8""" )
_lowercase = str(SCREAMING_SNAKE_CASE_ )
_lowercase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if fpaa:
_lowercase = model.half()
_lowercase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_lowercase = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if prefix is None:
_lowercase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ):
_lowercase = [prefix + text for text in examples_chunk]
_lowercase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , truncation=SCREAMING_SNAKE_CASE_ , padding="""longest""" ).to(SCREAMING_SNAKE_CASE_ )
_lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , )
_lowercase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
_lowercase = int(time.time() - start_time ) # seconds
_lowercase = len(SCREAMING_SNAKE_CASE_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def UpperCamelCase__ ( ) -> Any:
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int=True ) -> List[str]:
_lowercase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=SCREAMING_SNAKE_CASE_ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=SCREAMING_SNAKE_CASE_ , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=SCREAMING_SNAKE_CASE_ , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=SCREAMING_SNAKE_CASE_ , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_lowercase , _lowercase = parser.parse_known_args()
_lowercase = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_lowercase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
_lowercase = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , )
if args.reference_path is None:
return {}
# Compute scores
_lowercase = calculate_bleu if """translation""" in args.task else calculate_rouge
_lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
_lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
_lowercase = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
scores.update(SCREAMING_SNAKE_CASE_ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE_ )
if args.info:
_lowercase = args.info
if verbose:
print(SCREAMING_SNAKE_CASE_ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 287
|
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
_lowercase = gray_code_sequence_string(SCREAMING_SNAKE_CASE_ )
#
# convert them to integers
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
_lowercase = int(sequence[i] , 2 )
return sequence
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowercase = gray_code_sequence_string(bit_count - 1 )
_lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowercase = """0""" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowercase = """1""" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCAmelCase_ ( __lowercase : int = 100 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
_UpperCAmelCase = pre_numerator
_UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
_UpperCAmelCase = cur_numerator
_UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(__lowercase )
if __name__ == "__main__":
print(F"{solution() = }")
| 719
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCAmelCase_ ( __lowercase : List[str] ) -> int:
'''simple docstring'''
_UpperCAmelCase = SwinvaConfig()
_UpperCAmelCase = swinva_name.split("_" )
_UpperCAmelCase = name_split[1]
if "to" in name_split[3]:
_UpperCAmelCase = int(name_split[3][-3:] )
else:
_UpperCAmelCase = int(name_split[3] )
if "to" in name_split[2]:
_UpperCAmelCase = int(name_split[2][-2:] )
else:
_UpperCAmelCase = int(name_split[2][6:] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "to" in swinva_name:
_UpperCAmelCase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_UpperCAmelCase = 2_1841
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-22k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def UpperCAmelCase_ ( __lowercase : str ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_UpperCAmelCase = "encoder." + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_UpperCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
_UpperCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_UpperCAmelCase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_UpperCAmelCase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_UpperCAmelCase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_UpperCAmelCase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
_UpperCAmelCase = "layernorm.weight"
if name == "norm.bias":
_UpperCAmelCase = "layernorm.bias"
if "head" in name:
_UpperCAmelCase = name.replace("head" , "classifier" )
else:
_UpperCAmelCase = "swinv2." + name
return name
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(__lowercase )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split("." )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[dim : dim * 2, :]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[:dim]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[-dim:]
else:
_UpperCAmelCase = val
return orig_state_dict
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
_UpperCAmelCase = get_swinva_config(__lowercase )
_UpperCAmelCase = SwinvaForImageClassification(__lowercase )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , __lowercase )
model.load_state_dict(__lowercase )
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
_UpperCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
_UpperCAmelCase = image_processor(images=__lowercase , return_tensors="pt" )
_UpperCAmelCase = timm_model(inputs["pixel_values"] )
_UpperCAmelCase = model(**__lowercase ).logits
assert torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowercase )
model.push_to_hub(
repo_path_or_name=Path(__lowercase , __lowercase ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 119
| 0
|
from __future__ import annotations
import os
from collections.abc import Mapping
SCREAMING_SNAKE_CASE : Dict = tuple[int, int]
class UpperCamelCase :
def __init__(self , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
UpperCamelCase_ : List[Any] = vertices
UpperCamelCase_ : int = {
(min(__lowerCAmelCase ), max(__lowerCAmelCase )): weight for edge, weight in edges.items()
}
def A_ (self , __UpperCamelCase , __UpperCamelCase ) -> Dict:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCamelCase_ : Dict = weight
def A_ (self ) -> Dict:
UpperCamelCase_ : Union[str, Any] = Graph({min(self.vertices )} , {} )
UpperCamelCase_ : Dict = 42
UpperCamelCase_ : Optional[int] = 42
UpperCamelCase_ : Any = 42
UpperCamelCase_ : Union[str, Any] = 42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCamelCase_ : int = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCamelCase_ : Tuple = edge
UpperCamelCase_ : Any = weight
subgraph.add_edge(__lowerCAmelCase , __lowerCAmelCase )
return subgraph
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str = "p107_network.txt" ):
UpperCamelCase_ : int = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
UpperCamelCase_ : str = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCamelCase_ : Optional[int] = {}
UpperCamelCase_ : str = 42
UpperCamelCase_ : int = 42
UpperCamelCase_ : int = 42
with open(lowerCAmelCase__ ) as f:
UpperCamelCase_ : Any = f.read().strip().split("""\n""" )
UpperCamelCase_ : Tuple = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(lowerCAmelCase__ ) ):
for edgea in range(lowerCAmelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCamelCase_ : Tuple = int(adjaceny_matrix[edgea][edgea] )
UpperCamelCase_ : List[str] = Graph(set(range(len(lowerCAmelCase__ ) ) ) , lowerCAmelCase__ )
UpperCamelCase_ : Optional[Any] = graph.prims_algorithm()
UpperCamelCase_ : Optional[Any] = sum(graph.edges.values() )
UpperCamelCase_ : str = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 635
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
__lowerCAmelCase : str ="""path-to-your-trained-model"""
__lowerCAmelCase : int =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
__lowerCAmelCase : Optional[int] ="""A photo of sks dog in a bucket"""
__lowerCAmelCase : int =pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 359
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **__lowerCAmelCase ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCamelCase__ = deprecated_arg[3:]
UpperCamelCase__ = not kwargs.pop(__lowerCAmelCase )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
UpperCamelCase__ = kwargs.pop("""tpu_name""" , self.tpu_name )
UpperCamelCase__ = kwargs.pop("""device_idx""" , self.device_idx )
UpperCamelCase__ = kwargs.pop("""eager_mode""" , self.eager_mode )
UpperCamelCase__ = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__lowerCAmelCase )
snake_case : str = field(
default=_a , metadata={"""help""": """Name of TPU"""} , )
snake_case : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
snake_case : bool = field(default=_a , metadata={"""help""": """Benchmark models in eager model."""} )
snake_case : bool = field(
default=_a , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def _lowerCamelCase ( self ):
requires_backends(self , ["""tf"""] )
UpperCamelCase__ = None
if self.tpu:
try:
if self.tpu_name:
UpperCamelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCamelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCamelCase__ = None
return tpu
@cached_property
def _lowerCamelCase ( self ):
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCamelCase__ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
UpperCamelCase__ = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
UpperCamelCase__ = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def _lowerCamelCase ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _lowerCamelCase ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _lowerCamelCase ( self ):
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _lowerCamelCase ( self ):
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _lowerCamelCase ( self ):
return self.n_gpu > 0
| 548
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """data2vec-audio"""
def __init__( self , __lowerCAmelCase=32 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="gelu" , __lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase=False , __lowerCAmelCase=16 , __lowerCAmelCase=19 , __lowerCAmelCase=5 , __lowerCAmelCase=0.05 , __lowerCAmelCase=10 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=10 , __lowerCAmelCase=0 , __lowerCAmelCase="sum" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=256 , __lowerCAmelCase=(512, 512, 512, 512, 1500) , __lowerCAmelCase=(5, 3, 3, 1, 1) , __lowerCAmelCase=(1, 2, 3, 1, 1) , __lowerCAmelCase=512 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=3 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=None , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = conv_pos_kernel_size
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# adapter
UpperCamelCase__ = add_adapter
UpperCamelCase__ = adapter_kernel_size
UpperCamelCase__ = adapter_stride
UpperCamelCase__ = num_adapter_layers
UpperCamelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = xvector_output_dim
@property
def _lowerCamelCase ( self ):
return math.prod(self.conv_stride )
| 548
| 1
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
A__ : List[Any] = True
except (ImportError, AttributeError):
A__ : Any = object
def a_ ( *_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : List[str] ) -> List[str]:
pass
A__ : Dict = False
A__ : Tuple = logging.get_logger('''transformers-cli/serving''')
def a_ ( _UpperCAmelCase : Namespace ) -> Dict:
__snake_case : List[Any] = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
return ServeCommand(_UpperCAmelCase ,args.host ,args.port ,args.workers )
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = 42
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = 42
A__ = 42
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = 42
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = 42
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def A_ ( __a : ArgumentParser ) -> Any:
'''simple docstring'''
__snake_case : Any = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=__a , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=__a , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=__a , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=__a , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=__a , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=__a , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=__a , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=__a , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=__a )
def __init__( self : str , __a : Pipeline , __a : str , __a : int , __a : int ) -> int:
'''simple docstring'''
__snake_case : str = pipeline
__snake_case : Union[str, Any] = host
__snake_case : Dict = port
__snake_case : Dict = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f'''Serving model over {host}:{port}''' )
__snake_case : Any = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=__a , response_class=__a , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=__a , response_class=__a , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=__a , response_class=__a , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=__a , response_class=__a , methods=['POST'] , ),
] , timeout=600 , )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def A_ ( self : List[str] , __a : str = Body(__a , embed=__a ) , __a : bool = Body(__a , embed=__a ) ) -> Optional[Any]:
'''simple docstring'''
try:
__snake_case : Dict = self._pipeline.tokenizer.tokenize(__a )
if return_ids:
__snake_case : Tuple = self._pipeline.tokenizer.convert_tokens_to_ids(__a )
return ServeTokenizeResult(tokens=__a , tokens_ids=__a )
else:
return ServeTokenizeResult(tokens=__a )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(__a )} )
def A_ ( self : Any , __a : List[int] = Body(__a , embed=__a ) , __a : bool = Body(__a , embed=__a ) , __a : bool = Body(__a , embed=__a ) , ) -> Tuple:
'''simple docstring'''
try:
__snake_case : Any = self._pipeline.tokenizer.decode(__a , __a , __a )
return ServeDeTokenizeResult(model='' , text=__a )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(__a )} )
async def A_ ( self : List[Any] , __a : Optional[Any]=Body(__a , embed=__a ) ) -> Dict:
'''simple docstring'''
# Check we don't have empty string
if len(__a ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__snake_case : List[Any] = self._pipeline(__a )
return ServeForwardResult(output=__a )
except Exception as e:
raise HTTPException(500 , {'error': str(__a )} )
| 286
|
'''simple docstring'''
import qiskit
def a_ ( _UpperCAmelCase : int = 2 ) -> qiskit.result.counts.Counts:
__snake_case : Union[str, Any] = qubits
# Using Aer's simulator
__snake_case : List[Any] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
__snake_case : Dict = qiskit.QuantumCircuit(_UpperCAmelCase ,_UpperCAmelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 ,_UpperCAmelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 ,_UpperCAmelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_UpperCAmelCase ) ) ,list(range(_UpperCAmelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__snake_case : Optional[Any] = qiskit.execute(_UpperCAmelCase ,_UpperCAmelCase ,shots=10_00 )
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 286
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : Union[str, Any] = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
_lowercase : List[Any] = {
"junnyu/roformer_chinese_small": 1_5_3_6,
"junnyu/roformer_chinese_base": 1_5_3_6,
"junnyu/roformer_chinese_char_small": 5_1_2,
"junnyu/roformer_chinese_char_base": 5_1_2,
"junnyu/roformer_small_discriminator": 1_2_8,
"junnyu/roformer_small_generator": 1_2_8,
}
_lowercase : List[Any] = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_INIT_CONFIGURATION
_a = RoFormerTokenizer
def __init__( self : Any, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Dict=None, lowerCamelCase : Dict=True, lowerCamelCase : Union[str, Any]="[UNK]", lowerCamelCase : List[Any]="[SEP]", lowerCamelCase : List[str]="[PAD]", lowerCamelCase : List[str]="[CLS]", lowerCamelCase : List[str]="[MASK]", lowerCamelCase : int=True, lowerCamelCase : List[Any]=None, **lowerCamelCase : int, )-> int:
super().__init__(
__A, tokenizer_file=__A, do_lower_case=__A, unk_token=__A, sep_token=__A, pad_token=__A, cls_token=__A, mask_token=__A, tokenize_chinese_chars=__A, strip_accents=__A, **__A, )
lowerCamelCase__ : Any =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''', __A ) != do_lower_case
or pre_tok_state.get('''strip_accents''', __A ) != strip_accents
):
lowerCamelCase__ : List[str] =getattr(__A, pre_tok_state.pop('''type''' ) )
lowerCamelCase__ : Optional[Any] =do_lower_case
lowerCamelCase__ : Dict =strip_accents
lowerCamelCase__ : List[str] =pre_tok_class(**__A )
lowerCamelCase__ : int =do_lower_case
def __getstate__( self : Union[str, Any] )-> str:
lowerCamelCase__ : int =self.__dict__.copy()
lowerCamelCase__ : List[Any] =BertPreTokenizer()
return state
def __setstate__( self : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Any =d
lowerCamelCase__ : Optional[Any] =self.__dict__['''_tokenizer'''].get_vocab()
lowerCamelCase__ : List[str] =PreTokenizer.custom(JiebaPreTokenizer(__A ) )
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : str=None )-> int:
lowerCamelCase__ : Optional[int] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self : Tuple, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
lowerCamelCase__ : Optional[int] =[self.sep_token_id]
lowerCamelCase__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
lowerCamelCase__ : int =self._tokenizer.model.save(__A, name=__A )
return tuple(__A )
def snake_case ( self : List[str], lowerCamelCase : int, lowerCamelCase : List[Any]=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : List[str]=False, **lowerCamelCase : Optional[Any], )-> Dict:
lowerCamelCase__ : Dict =BertPreTokenizer()
return super().save_pretrained(__A, __A, __A, __A, **__A )
| 718
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625
| 0
|
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 1_0_0_0 ) -> int:
SCREAMING_SNAKE_CASE_ : Dict =1
SCREAMING_SNAKE_CASE_ : Tuple =0
for divide_by_number in range(UpperCAmelCase_ , digit + 1 ):
SCREAMING_SNAKE_CASE_ : list[int] =[]
SCREAMING_SNAKE_CASE_ : List[str] =numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] =len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Dict =divide_by_number
else:
has_been_divided.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : int =now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( A ):
__lowerCamelCase = (DDIMParallelScheduler,)
__lowerCamelCase = (("eta", 0.0), ("num_inference_steps", 5_0))
def _snake_case ( self , **__A ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__A )
return config
def _snake_case ( self , **__A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : int =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : str =self.get_scheduler_config(**__A )
SCREAMING_SNAKE_CASE_ : List[Any] =scheduler_class(**__A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =10, 0.0
SCREAMING_SNAKE_CASE_ : List[str] =self.dummy_model()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.dummy_sample_deter
scheduler.set_timesteps(__A )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Optional[int] =model(__A , __A )
SCREAMING_SNAKE_CASE_ : List[Any] =scheduler.step(__A , __A , __A , __A ).prev_sample
return sample
def _snake_case ( self ) -> Optional[Any]:
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__A )
def _snake_case ( self ) -> Optional[int]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
SCREAMING_SNAKE_CASE_ : Dict =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ : Dict =scheduler_class(**__A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _snake_case ( self ) -> Dict:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def _snake_case ( self ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def _snake_case ( self ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def _snake_case ( self ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def _snake_case ( self ) -> int:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__A )
def _snake_case ( self ) -> Tuple:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__A )
def _snake_case ( self ) -> List[Any]:
self.check_over_configs(thresholding=__A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__A , prediction_type=__A , sample_max_value=__A , )
def _snake_case ( self ) -> Dict:
for t in [1, 10, 49]:
self.check_over_forward(time_step=__A )
def _snake_case ( self ) -> Dict:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__A , num_inference_steps=__A )
def _snake_case ( self ) -> int:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__A , eta=__A )
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : List[Any] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] =scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : List[str] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : str =scheduler_class(**__A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =10, 0.0
scheduler.set_timesteps(__A )
SCREAMING_SNAKE_CASE_ : List[str] =self.dummy_model()
SCREAMING_SNAKE_CASE_ : str =self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ : int =self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ : Tuple =self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] =samplea.shape[0]
SCREAMING_SNAKE_CASE_ : List[str] =torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ : Any =torch.arange(__A )[0:3, None].repeat(1 , __A )
SCREAMING_SNAKE_CASE_ : Dict =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ : Optional[int] =scheduler.batch_step_no_noise(__A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __A )
SCREAMING_SNAKE_CASE_ : str =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : int =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def _snake_case ( self ) -> Any:
SCREAMING_SNAKE_CASE_ : List[str] =self.full_loop()
SCREAMING_SNAKE_CASE_ : List[Any] =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : str =self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : str =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def _snake_case ( self ) -> Dict:
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE_ : Tuple =self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : Tuple =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : int =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def _snake_case ( self ) -> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE_ : List[str] =self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : Tuple =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 443
| 1
|
import math
def _snake_case ( A_ : int ):
"""simple docstring"""
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
a_ : Optional[Any] = range(3 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _snake_case ( A_ : Dict , A_ : Optional[Any]=1 , **A_ : Optional[Any] ):
"""simple docstring"""
a_ : Optional[Any] = factor * value
a_ : Dict = value
while not is_prime(UpperCAmelCase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **UpperCAmelCase__ )
return value
| 708
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=[32, 64, 1_28] , lowerCAmelCase_=[1, 2, 1] , lowerCAmelCase_=[2, 2, 4] , lowerCAmelCase_=2 , lowerCAmelCase_=2.0 , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_="gelu" , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=10 , lowerCAmelCase_=8 , lowerCAmelCase_=["stage1", "stage2"] , lowerCAmelCase_=[1, 2] , ):
'''simple docstring'''
a_ : Optional[Any] = parent
a_ : Union[str, Any] = batch_size
a_ : Optional[Any] = image_size
a_ : Dict = patch_size
a_ : Optional[Any] = num_channels
a_ : Union[str, Any] = embed_dim
a_ : int = hidden_sizes
a_ : int = depths
a_ : Optional[int] = num_heads
a_ : Optional[Any] = window_size
a_ : Tuple = mlp_ratio
a_ : List[str] = qkv_bias
a_ : Union[str, Any] = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : Dict = drop_path_rate
a_ : Optional[int] = hidden_act
a_ : int = use_absolute_embeddings
a_ : List[Any] = patch_norm
a_ : int = layer_norm_eps
a_ : Dict = initializer_range
a_ : List[Any] = is_training
a_ : Any = scope
a_ : int = use_labels
a_ : Union[str, Any] = type_sequence_label_size
a_ : Any = encoder_stride
a_ : Optional[Any] = out_features
a_ : str = out_indices
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : List[Any] = None
if self.use_labels:
a_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] = FocalNetModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : Optional[Any] = model(lowerCAmelCase_ )
a_ : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
a_ : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] = FocalNetBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : Tuple = model(lowerCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
a_ : Optional[Any] = None
a_ : List[Any] = FocalNetBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : List[str] = model(lowerCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] = FocalNetForMaskedImageModeling(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : str = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a_ : List[str] = 1
a_ : Optional[Any] = FocalNetForMaskedImageModeling(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : int = model(lowerCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Any = self.type_sequence_label_size
a_ : str = FocalNetForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : List[Any] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ : Optional[Any] = 1
a_ : Dict = FocalNetForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = self.prepare_config_and_inputs()
a_ , a_ , a_ : List[str] = config_and_inputs
a_ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
a_ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = FocalNetModelTester(self )
a_ : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , embed_dim=37 , has_text_modality=lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self ):
'''simple docstring'''
return
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
a_ : int = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
a_ : Dict = model_class(lowerCAmelCase_ )
a_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Dict = [*signature.parameters.keys()]
a_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : str = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
a_ : str = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
a_ : Optional[Any] = outputs.hidden_states
a_ : int = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# FocalNet has a different seq_length
a_ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a_ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
a_ : str = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
a_ , a_ , a_ , a_ : Optional[Any] = reshaped_hidden_states[0].shape
a_ : Dict = (
reshaped_hidden_states[0].view(lowerCAmelCase_ , lowerCAmelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
a_ : Any = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Optional[int] = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Optional[Any] = 3
a_ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
a_ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a_ : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
a_ : List[str] = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : int = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) )
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = FocalNetModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Union[str, Any] = _config_zero_init(lowerCAmelCase_ )
for model_class in self.all_model_classes:
a_ : Dict = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase_ )
a_ : Dict = self.default_image_processor
a_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
a_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
a_ : int = model(**lowerCAmelCase_ )
# verify the logits
a_ : Optional[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
a_ : Dict = torch.tensor([0.2166, -0.4368, 0.2191] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = (FocalNetBackbone,) if is_torch_available() else ()
a_ = FocalNetConfig
a_ = False
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = FocalNetModelTester(self )
| 460
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = """nat"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :List[Any] , __snake_case :Tuple=4 , __snake_case :int=3 , __snake_case :Union[str, Any]=64 , __snake_case :Optional[Any]=[3, 4, 6, 5] , __snake_case :Tuple=[2, 4, 8, 16] , __snake_case :Optional[int]=7 , __snake_case :Optional[int]=3.0 , __snake_case :int=True , __snake_case :Dict=0.0 , __snake_case :Tuple=0.0 , __snake_case :List[Any]=0.1 , __snake_case :Optional[int]="gelu" , __snake_case :Optional[Any]=0.02 , __snake_case :Optional[int]=1E-5 , __snake_case :List[str]=0.0 , __snake_case :List[str]=None , __snake_case :Optional[int]=None , **__snake_case :List[Any] , ):
'''simple docstring'''
super().__init__(**__snake_case )
__magic_name__ : Any =patch_size
__magic_name__ : Optional[int] =num_channels
__magic_name__ : Tuple =embed_dim
__magic_name__ : List[Any] =depths
__magic_name__ : Union[str, Any] =len(__snake_case )
__magic_name__ : List[Any] =num_heads
__magic_name__ : int =kernel_size
__magic_name__ : Tuple =mlp_ratio
__magic_name__ : Tuple =qkv_bias
__magic_name__ : Dict =hidden_dropout_prob
__magic_name__ : Dict =attention_probs_dropout_prob
__magic_name__ : Union[str, Any] =drop_path_rate
__magic_name__ : Union[str, Any] =hidden_act
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Union[str, Any] =initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : List[str] =int(embed_dim * 2 ** (len(__snake_case ) - 1) )
__magic_name__ : List[str] =layer_scale_init_value
__magic_name__ : List[Any] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__snake_case ) + 1 )]
__magic_name__ , __magic_name__ : Dict =get_aligned_output_features_output_indices(
out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names )
| 21
|
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = ['pixel_values']
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : Any , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = size if size is not None else {'''shortest_edge''': 2_5_6}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowercase_ = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase_ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[str] ) -> np.ndarray:
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : str , ) -> List[str]:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Tuple] = None ) -> List[str]:
lowercase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
lowercase_ = target_sizes.numpy()
lowercase_ = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE_ )
lowercase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = logits.argmax(dim=1 )
lowercase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 409
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[Any] = RoFormerTokenizer
a :Any = RoFormerTokenizerFast
a :List[str] = True
a :List[Any] = True
def _lowercase ( self : str ) -> Any:
super().setUp()
def _lowercase ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
lowercase_ = '''永和服装饰品有限公司,今天天气非常好'''
lowercase_ = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _lowercase ( self : Any ) -> Any:
lowercase_ = self.get_tokenizer()
lowercase_ , lowercase_ = self.get_chinese_input_output_texts()
lowercase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , output_text.split() )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Dict:
lowercase_ = self.get_rust_tokenizer()
lowercase_ , lowercase_ = self.get_chinese_input_output_texts()
lowercase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , output_text.split() )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
pass
def _lowercase ( self : Dict ) -> Optional[int]:
pass
def _lowercase ( self : Tuple ) -> str:
pass
| 409
| 1
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=30 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=32 ,__UpperCamelCase=2 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=0.6 ,__UpperCamelCase=None ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = parent
lowercase_ : List[str] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : List[Any] = patch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : Any = is_training
lowercase_ : List[str] = use_labels
lowercase_ : Optional[Any] = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : Tuple = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = type_sequence_label_size
lowercase_ : str = initializer_range
lowercase_ : int = mask_ratio
lowercase_ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase_ : Optional[int] = (image_size // patch_size) ** 2
lowercase_ : Union[str, Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[Any] = TFViTMAEModel(config=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase ,training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Tuple = TFViTMAEForPreTraining(__UpperCamelCase )
lowercase_ : Tuple = model(__UpperCamelCase ,training=__UpperCamelCase )
# expected sequence length = num_patches
lowercase_ : Union[str, Any] = (self.image_size // self.patch_size) ** 2
lowercase_ : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase_ : Optional[int] = 1
lowercase_ : Optional[int] = TFViTMAEForPreTraining(__UpperCamelCase )
lowercase_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : List[Any] = model(__UpperCamelCase ,training=__UpperCamelCase )
lowercase_ : Dict = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[str] = self.prepare_config_and_inputs()
(lowercase_) : Optional[Any] = config_and_inputs
lowercase_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = TFViTMAEModelTester(self )
lowercase_ : List[str] = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
lowercase_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase ,tf.keras.layers.Layer ) )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(__UpperCamelCase )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Tuple = [*signature.parameters.keys()]
lowercase_ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
np.random.seed(2 )
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(__UpperCamelCase )
lowercase_ : List[str] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : int = model(__UpperCamelCase ,noise=__UpperCamelCase )
lowercase_ : List[Any] = copy.deepcopy(self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
lowercase_ : List[str] = model(**__UpperCamelCase ,noise=__UpperCamelCase )
lowercase_ : str = outputs_dict[0].numpy()
lowercase_ : Tuple = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1e-6 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
np.random.seed(2 )
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowercase_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__UpperCamelCase ):
lowercase_ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__UpperCamelCase ):
lowercase_ : Any = v.numpy()
else:
lowercase_ : Optional[int] = np.array(__UpperCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(__UpperCamelCase )
lowercase_ : List[str] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Any = prepare_numpy_arrays(__UpperCamelCase )
lowercase_ : Tuple = model(__UpperCamelCase ,noise=__UpperCamelCase )
lowercase_ : Tuple = model(**__UpperCamelCase ,noise=__UpperCamelCase )
self.assert_outputs_same(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
np.random.seed(2 )
lowercase_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase_ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase_ : Tuple = tf.constant(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase_ : Optional[int] = tf_noise
super().check_pt_tf_models(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
np.random.seed(2 )
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__UpperCamelCase )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(__UpperCamelCase ,__UpperCamelCase ),)
if isinstance(__UpperCamelCase ,__UpperCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__UpperCamelCase ,'_keras_serializable' ,__UpperCamelCase )
}
lowercase_ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase_ : List[str] = tf.convert_to_tensor(__UpperCamelCase )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase_ : Optional[int] = main_layer_class(__UpperCamelCase )
lowercase_ : str = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase_ : Optional[int] = tf.keras.Model(__UpperCamelCase ,outputs=main_layer(__UpperCamelCase ) )
lowercase_ : List[str] = model(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : List[Any] = os.path.join(__UpperCamelCase ,'keras_model.h5' )
model.save(__UpperCamelCase )
lowercase_ : Optional[int] = tf.keras.models.load_model(
__UpperCamelCase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__UpperCamelCase ,tf.keras.Model )
lowercase_ : Any = model(__UpperCamelCase )
self.assert_outputs_same(__UpperCamelCase ,__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
np.random.seed(2 )
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = int((config.image_size // config.patch_size) ** 2 )
lowercase_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(__UpperCamelCase )
lowercase_ : Dict = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : str = model(__UpperCamelCase ,noise=__UpperCamelCase )
if model_class.__name__ == "TFViTMAEModel":
lowercase_ : Tuple = outputs.last_hidden_state.numpy()
lowercase_ : Dict = 0
else:
lowercase_ : List[Any] = outputs.logits.numpy()
lowercase_ : Tuple = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase ,saved_model=__UpperCamelCase )
lowercase_ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
lowercase_ : Dict = model(__UpperCamelCase ,noise=__UpperCamelCase )
if model_class.__name__ == "TFViTMAEModel":
lowercase_ : Tuple = after_outputs["last_hidden_state"].numpy()
lowercase_ : List[Any] = 0
else:
lowercase_ : int = after_outputs["logits"].numpy()
lowercase_ : str = 0
lowercase_ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase ,1e-5 )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
np.random.seed(2 )
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(__UpperCamelCase )
lowercase_ : List[Any] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Any = model(__UpperCamelCase ,noise=__UpperCamelCase )
lowercase_ : Union[str, Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__UpperCamelCase )
lowercase_ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase_ : Optional[int] = model_class.from_config(model.config )
lowercase_ : Any = new_model(__UpperCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
lowercase_ : str = new_model(__UpperCamelCase ,noise=__UpperCamelCase )
self.assert_outputs_same(__UpperCamelCase ,__UpperCamelCase )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : str = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__( ):
lowercase_ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
np.random.seed(2 )
lowercase_ : Any = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
lowercase_ : Tuple = self.default_image_processor
lowercase_ : List[str] = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=__UpperCamelCase ,return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase_ : List[Any] = ViTMAEConfig()
lowercase_ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase_ : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase_ : str = model(**__UpperCamelCase ,noise=__UpperCamelCase )
# verify the logits
lowercase_ : Union[str, Any] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : List[str] = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,__UpperCamelCase ,atol=1e-4 )
| 425
|
_snake_case : Optional[int] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_snake_case : Dict = ["a", "b", "c", "d", "e"]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = start
# add current to visited
visited.append(__lowerCamelCase )
__snake_case : List[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__snake_case : Tuple = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
for vertice in vertices:
if vertice not in visited:
__snake_case : int = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# return sort
return sort
if __name__ == "__main__":
_snake_case : List[Any] = topological_sort("a", [], [])
print(sort)
| 81
| 0
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
UpperCAmelCase = input("""Enter image url: """).strip()
print(F'''Downloading image from {url} ...''')
UpperCAmelCase = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
UpperCAmelCase = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
UpperCAmelCase = requests.get(image_url).content
UpperCAmelCase = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 713
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCAmelCase = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
UpperCAmelCase = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
UpperCAmelCase = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : str=None , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Dict=False ) -> str:
if rouge_types is None:
_UpperCamelCase = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
_UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=__UpperCamelCase , use_stemmer=__UpperCamelCase )
if use_aggregator:
_UpperCamelCase = scoring.BootstrapAggregator()
else:
_UpperCamelCase = []
for ref, pred in zip(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = scorer.score(__UpperCamelCase , __UpperCamelCase )
if use_aggregator:
aggregator.add_scores(__UpperCamelCase )
else:
scores.append(__UpperCamelCase )
if use_aggregator:
_UpperCamelCase = aggregator.aggregate()
else:
_UpperCamelCase = {}
for key in scores[0]:
_UpperCamelCase = [score[key] for score in scores]
return result
| 342
| 0
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
A_ = logging.get_logger(__name__)
A_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
A_ = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
A_ = {
"facebook/blenderbot_small-90M": 5_12,
}
class _snake_case ( _a ):
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : Any = PRETRAINED_VOCAB_FILES_MAP
_A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Tuple = BlenderbotSmallTokenizer
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : int="<|endoftext|>" ,SCREAMING_SNAKE_CASE__ : Tuple="<|endoftext|>" ,SCREAMING_SNAKE_CASE__ : int="<|endoftext|>" ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
super().__init__(
ByteLevelBPETokenizer(
vocab=SCREAMING_SNAKE_CASE__ ,merges=SCREAMING_SNAKE_CASE__ ,add_prefix_space=SCREAMING_SNAKE_CASE__ ,trim_offsets=SCREAMING_SNAKE_CASE__ ,) ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:Optional[Any] = add_prefix_space
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Dict=None ):
SCREAMING_SNAKE_CASE:Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE:Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE:List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 143
|
'''simple docstring'''
def A_ ( snake_case = 1000 ):
SCREAMING_SNAKE_CASE:Tuple = 2**power
SCREAMING_SNAKE_CASE:Optional[int] = str(snake_case )
SCREAMING_SNAKE_CASE:int = list(snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = 0
for i in list_num:
sum_of_num += int(snake_case )
return sum_of_num
if __name__ == "__main__":
A_ = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
A_ = solution(power)
print("Sum of the digits is: ", result)
| 143
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase__ : Any = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 416
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Optional[int] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = AlbertTokenizer
SCREAMING_SNAKE_CASE_ : Dict = AlbertTokenizerFast
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : int = True
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : int = AlbertTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : Union[str, Any] , UpperCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = '''this is a test'''
_UpperCAmelCase : Union[str, Any] = '''this is a test'''
return input_text, output_text
def a_ ( self : Tuple ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = '''<pad>'''
_UpperCAmelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def a_ ( self : int ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(UpperCAmelCase_ ) , 30000 )
def a_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def a_ ( self : Any ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : Dict = '''I was born in 92000, and this is falsé.'''
_UpperCAmelCase : Tuple = tokenizer.tokenize(UpperCAmelCase_ )
_UpperCAmelCase : Tuple = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Dict = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
_UpperCAmelCase : str = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Any = tokenizer.encode(UpperCAmelCase_ )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def a_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = AlbertTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
_UpperCAmelCase : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [48, 25, 21, 1289] )
_UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AlbertTokenizer(UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = tokenizer.encode('''sequence builders''' )
_UpperCAmelCase : Optional[Any] = tokenizer.encode('''multi-sequence build''' )
_UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 416
| 1
|
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Any:
UpperCamelCase :Tuple = {}
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ) -> str:
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCamelCase :Tuple = [[w, v]]
if not self.graph.get(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Optional[int] = []
def UpperCAmelCase ( self ) -> Any:
return list(self.graph )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ) -> Optional[int]:
if s == d:
return []
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :Optional[Any] = []
if s == -2:
UpperCamelCase :Optional[int] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :Tuple = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Union[str, Any] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return visited
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-1 ) -> Any:
if c == -1:
UpperCamelCase :Optional[Any] = floor(random() * 1_0000 ) + 10
for i in range(SCREAMING_SNAKE_CASE_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCamelCase :Any = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> List[Any]:
UpperCamelCase :Tuple = deque()
UpperCamelCase :List[str] = []
if s == -2:
UpperCamelCase :Tuple = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
while d:
UpperCamelCase :Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase :List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
return len(self.graph[u] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> Dict:
UpperCamelCase :Optional[Any] = []
UpperCamelCase :Dict = []
if s == -2:
UpperCamelCase :int = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = s
UpperCamelCase :List[str] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :Union[str, Any] = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Dict = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return sorted_nodes
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Optional[Any] = []
UpperCamelCase :Dict = []
UpperCamelCase :Dict = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = -2
UpperCamelCase :List[Any] = []
UpperCamelCase :int = s
UpperCamelCase :Dict = False
UpperCamelCase :Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase :Optional[int] = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :Optional[Any] = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Union[str, Any] = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = s
UpperCamelCase :int = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return list(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :List[str] = []
UpperCamelCase :List[Any] = []
UpperCamelCase :Tuple = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = -2
UpperCamelCase :Tuple = []
UpperCamelCase :int = s
UpperCamelCase :str = False
UpperCamelCase :List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase :Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase :Dict = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :List[Any] = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Optional[int] = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = s
UpperCamelCase :Union[str, Any] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ) -> Union[str, Any]:
UpperCamelCase :Tuple = time()
self.dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = time()
return end - begin
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> List[str]:
UpperCamelCase :str = time()
self.bfs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = time()
return end - begin
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
UpperCamelCase :Optional[Any] = {}
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ) -> str:
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCamelCase :Any = [[w, v]]
# add the other way
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCamelCase :Optional[int] = [[w, u]]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE_ )
# the other way round
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ) -> str:
if s == d:
return []
UpperCamelCase :List[str] = []
UpperCamelCase :int = []
if s == -2:
UpperCamelCase :Tuple = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :List[Any] = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Dict = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return visited
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-1 ) -> Any:
if c == -1:
UpperCamelCase :Dict = floor(random() * 1_0000 ) + 10
for i in range(SCREAMING_SNAKE_CASE_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCamelCase :List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> List[Any]:
UpperCamelCase :str = deque()
UpperCamelCase :Dict = []
if s == -2:
UpperCamelCase :Optional[int] = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
while d:
UpperCamelCase :str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
return len(self.graph[u] )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :str = []
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :List[str] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = -2
UpperCamelCase :Optional[Any] = []
UpperCamelCase :List[str] = s
UpperCamelCase :int = False
UpperCamelCase :List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase :str = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :Optional[int] = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Dict = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = s
UpperCamelCase :str = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return list(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :int = []
UpperCamelCase :Tuple = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = -2
UpperCamelCase :List[str] = []
UpperCamelCase :str = s
UpperCamelCase :Optional[int] = False
UpperCamelCase :Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase :int = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase :List[str] = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :str = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Dict = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = s
UpperCamelCase :int = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return False
def UpperCAmelCase ( self ) -> Dict:
return list(self.graph )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ) -> Optional[Any]:
UpperCamelCase :int = time()
self.dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = time()
return end - begin
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> int:
UpperCamelCase :Union[str, Any] = time()
self.bfs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = time()
return end - begin
| 658
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Tuple = ["""input_values""", """attention_mask"""]
def __init__( self , snake_case = 1 , snake_case = 16_000 , snake_case = 0.0 , snake_case = False , snake_case = 80 , snake_case = 16 , snake_case = 64 , snake_case = "hann_window" , snake_case = 1.0 , snake_case = 80 , snake_case = 7_600 , snake_case = 1E-10 , snake_case = 2 , snake_case = True , **snake_case , ) -> Dict:
"""simple docstring"""
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
a__ : Any = do_normalize
a__ : List[str] = return_attention_mask
a__ : List[Any] = num_mel_bins
a__ : List[str] = hop_length
a__ : int = win_length
a__ : List[Any] = win_function
a__ : List[str] = frame_signal_scale
a__ : List[Any] = fmin
a__ : Optional[Any] = fmax
a__ : Union[str, Any] = mel_floor
a__ : Union[str, Any] = reduction_factor
a__ : List[str] = win_length * sampling_rate // 1_000
a__ : List[Any] = hop_length * sampling_rate // 1_000
a__ : List[Any] = optimal_fft_length(self.sample_size )
a__ : Dict = (self.n_fft // 2) + 1
a__ : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
a__ : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , snake_case , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _snake_case ( snake_case , snake_case , snake_case = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
a__ : Tuple = np.array(snake_case , np.intaa )
a__ : List[str] = []
for vector, length in zip(snake_case , attention_mask.sum(-1 ) ):
a__ : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a__ : List[str] = padding_value
normed_input_values.append(snake_case )
else:
a__ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _snake_case ( self , snake_case , ) -> np.ndarray:
"""simple docstring"""
a__ : str = spectrogram(
snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , snake_case = None , snake_case = None , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
a__ : Dict = self._process_audio(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , )
else:
a__ : Optional[int] = None
if audio_target is not None:
a__ : List[Any] = self._process_audio(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , )
if inputs is None:
return inputs_target
else:
a__ : Tuple = inputs_target["input_values"]
a__ : Tuple = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
a__ : Tuple = decoder_attention_mask
return inputs
def _snake_case ( self , snake_case , snake_case = False , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
"""simple docstring"""
a__ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
a__ : List[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a__ : int = [np.asarray(snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
a__ : Any = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a__ : List[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ : Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
a__ : Optional[Any] = self.feature_size
# convert into correct format for padding
if is_target:
a__ : List[str] = [self._extract_mel_features(snake_case ) for waveform in speech]
a__ : Optional[Any] = BatchFeature({"input_values": features} )
a__ : str = self.num_mel_bins
else:
a__ : int = BatchFeature({"input_values": speech} )
a__ : int = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
a__ : Any = feature_size_hack
# convert input values to correct format
a__ : Tuple = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
a__ : int = [np.asarray(snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a__ : Union[str, Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a__ : Optional[int] = input_values.astype(np.floataa )
# convert attention_mask to correct format
a__ : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
a__ : Tuple = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a__ : Any = (
attention_mask
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a__ : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=snake_case , padding_value=self.padding_value )
if return_tensors is not None:
a__ : int = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
a__ : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
a__ : str = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 112
| 0
|
def snake_case ( snake_case__ :list , snake_case__ :list , snake_case__ :int , snake_case__ :int , snake_case__ :int) -> int:
if index == number_of_items:
return 0
_A = 0
_A = 0
_A = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1)
if weights[index] <= max_weight:
_A = values[index] + knapsack(
snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1)
return max(snake_case__ , snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str:
_A = """bilinear"""
_A = max_size
_A = short_edge_length
def __call__( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = []
for img in imgs:
_A , _A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
_A , _A = size, scale * w
else:
_A , _A = scale * h, size
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase_ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase_ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 )
img_augs.append(lowerCAmelCase_ )
return img_augs
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
with torch.no_grad():
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase_ ) == 1
for i in range(len(lowerCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase_ ) for x in images]
# now pad them to do the following operations
_A , _A = self.pad(lowerCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]:
assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!"
_A , _A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__)
tensor[:, 1].clamp_(min=0 , max=snake_case__)
tensor[:, 2].clamp_(min=0 , max=snake_case__)
tensor[:, 3].clamp_(min=0 , max=snake_case__)
| 83
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowercase__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str )-> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
_UpperCamelCase = int(_UpperCamelCase )
_UpperCamelCase = int(_UpperCamelCase )
_UpperCamelCase = []
for temp in range(int(_UpperCamelCase ) ):
series.append(f"1 / {pow(temp + 1 , int(_UpperCamelCase ) )}" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : int = int(input('''Enter the last number (nth term) of the P-Series'''))
snake_case_ : Union[str, Any] = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 138
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
snake_case_ : Optional[int] = '''scheduler_config.json'''
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = 5
@dataclass
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = 42
class A_ :
'''simple docstring'''
_lowerCAmelCase = SCHEDULER_CONFIG_NAME
_lowerCAmelCase = ["""dtype"""]
_lowerCAmelCase = []
_lowerCAmelCase = True
@classmethod
def a ( cls , A_ = None , A_ = None , A_=False , **A_ , ):
_UpperCamelCase , _UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
_UpperCamelCase , _UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , "create_state" ) and getattr(A_ , "has_state" , A_ ):
_UpperCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def a ( self , A_ , A_ = False , **A_ ):
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def a ( self ):
return self._get_compatibles()
@classmethod
def a ( cls ):
_UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
_UpperCamelCase = importlib.import_module(__name__.split("." )[0] )
_UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def lowercase__( _UpperCamelCase : jnp.ndarray , _UpperCamelCase : Tuple[int] )-> jnp.ndarray:
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : Tuple=0.999 , _UpperCamelCase : Any=jnp.floataa )-> jnp.ndarray:
"""simple docstring"""
def alpha_bar(_UpperCamelCase : Any ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_UpperCamelCase = []
for i in range(_UpperCamelCase ):
_UpperCamelCase = i / num_diffusion_timesteps
_UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class A_ :
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@classmethod
def a ( cls , A_ ):
_UpperCamelCase = scheduler.config
if config.trained_betas is not None:
_UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
_UpperCamelCase = 1.0 - betas
_UpperCamelCase = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def lowercase__( _UpperCamelCase : CommonSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = state.alphas_cumprod
_UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
_UpperCamelCase = sqrt_alpha_prod.flatten()
_UpperCamelCase = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
_UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
_UpperCamelCase = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowercase__( _UpperCamelCase : CommonSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray )-> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowercase__( _UpperCamelCase : CommonSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray )-> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 138
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowercase ( ) -> None:
print("""Making key files...""" )
make_key_files("""rsa""" , 1_024 )
print("""Key files generation successful.""" )
def lowercase ( SCREAMING_SNAKE_CASE__ : int ) -> tuple[tuple[int, int], tuple[int, int]]:
print("""Generating prime p...""" )
_snake_case : Optional[int] = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE__ )
print("""Generating prime q...""" )
_snake_case : List[str] = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
_snake_case : str = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(SCREAMING_SNAKE_CASE__ , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
_snake_case : Any = cryptoMath.find_mod_inverse(SCREAMING_SNAKE_CASE__ , (p - 1) * (q - 1) )
_snake_case : Tuple = (n, e)
_snake_case : Optional[Any] = (n, d)
return (public_key, private_key)
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> None:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
_snake_case , _snake_case : int = generate_key(SCREAMING_SNAKE_CASE__ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , """w""" ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , """w""" ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 198
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""image_processor""", """tokenizer"""]
snake_case_ : str = """ChineseCLIPImageProcessor"""
snake_case_ : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
_snake_case : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase , )
_snake_case : Tuple = kwargs.pop("""feature_extractor""")
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(lowerCAmelCase , lowerCAmelCase)
_snake_case : Optional[int] = self.image_processor
def __call__( self : List[Any] , lowerCAmelCase : str=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_snake_case : Dict = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if text is not None and images is not None:
_snake_case : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase) , tensor_type=lowerCAmelCase)
def UpperCamelCase_ ( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase)
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase , )
return self.image_processor_class
| 198
| 1
|
'''simple docstring'''
class lowerCAmelCase :
def __init__( self ) -> Any:
'''simple docstring'''
__snake_case = {}
def lowerCAmelCase ( self ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(__SCREAMING_SNAKE_CASE , ''' -> ''' , ''' -> '''.join([str(__SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__SCREAMING_SNAKE_CASE )
else:
# else make a new vertex
__snake_case = [to_vertex]
def lowerCAmelCase ( self ) -> None:
'''simple docstring'''
__snake_case = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
__snake_case = True
print(__SCREAMING_SNAKE_CASE , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 24
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24
| 1
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Tuple ) -> Any:
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(lowerCAmelCase__ )
from datasets import load_dataset
__SCREAMING_SNAKE_CASE : int = load_dataset('''nielsr/rvlcdip-demo''' )
__SCREAMING_SNAKE_CASE : List[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
__SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Tuple = model(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = outputs.logits
__SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=lowerCAmelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 701
|
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if index == r:
for j in range(lowercase__ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__SCREAMING_SNAKE_CASE : int = arr[i]
combination_util(lowercase__ , lowercase__ , lowercase__ , index + 1 , lowercase__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
# A temporary array to store all combination one by one
__SCREAMING_SNAKE_CASE : Dict = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase__ , lowercase__ , lowercase__ , 0 , lowercase__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__lowerCAmelCase : Tuple =[1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 260
| 0
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :Optional[int]=False ) -> Optional[int]:
_A = OmegaConf.load(a__ )
if display:
print(yaml.dump(OmegaConf.to_container(a__ ) ) )
return config
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :str=None , _snake_case :str=None ) -> Union[str, Any]:
if conf_path is None:
_A = './model_checkpoints/vqgan_only.yaml'
_A = load_config(a__ , display=a__ )
_A = VQModel(**config.model.params )
if ckpt_path is None:
_A = './model_checkpoints/vqgan_only.pt'
_A = torch.load(a__ , map_location=a__ )
if ".ckpt" in ckpt_path:
_A = sd['state_dict']
model.load_state_dict(a__ , strict=a__ )
model.to(a__ )
del sd
return model
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :Any ) -> int:
_A = model.encode(a__ )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
_A = model.decode(a__ )
return xrec
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Union[str, Any]=False ) -> int:
_A = string.rsplit('''.''' , 1 )
if reload:
_A = importlib.import_module(a__ )
importlib.reload(a__ )
return getattr(importlib.import_module(a__ , package=a__ ) , cls )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Dict:
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :List[str] , _snake_case :List[Any]=True , _snake_case :List[str]=True ) -> Any:
_A = instantiate_from_config(a__ )
if sd is not None:
model.load_state_dict(a__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Dict , _snake_case :Optional[int] , _snake_case :Optional[Any] ) -> Tuple:
if ckpt:
_A = torch.load(a__ , map_location='''cpu''' )
_A = pl_sd['global_step']
print(F'''loaded model from global step {global_step}.''' )
else:
_A = {'state_dict': None}
_A = None
_A = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=a__ , eval_mode=a__ )['model']
return model, global_step
| 2
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
SCREAMING_SNAKE_CASE_ = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 517
| 0
|
from __future__ import annotations
from statistics import mean
def __UpperCamelCase ( snake_case , snake_case , snake_case ) -> list[int]:
'''simple docstring'''
__A = [0] * no_of_processes
__A = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(snake_case ):
__A = burst_time[i]
__A = []
__A = 0
__A = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__A = []
__A = -1
for i in range(snake_case ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(snake_case )
if len(snake_case ) > 0:
__A = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__A = i
total_time += burst_time[target_process]
completed += 1
__A = 0
__A = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __UpperCamelCase ( snake_case , snake_case , snake_case ) -> list[int]:
'''simple docstring'''
__A = [0] * no_of_processes
for i in range(snake_case ):
__A = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
_UpperCamelCase : Tuple = 4
_UpperCamelCase : str = [2, 5, 3, 7]
_UpperCamelCase : int = [0, 0, 0, 0]
_UpperCamelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_UpperCamelCase : Optional[int] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 341
|
_UpperCamelCase : Optional[int] = 8.31_44_62 # Unit - J mol-1 K-1
def __UpperCamelCase ( snake_case , snake_case , snake_case ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __UpperCamelCase ( snake_case , snake_case , snake_case ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341
| 1
|
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 61
|
from __future__ import annotations
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCAmelCase__ = result + left + right
return input_list
def _A ( lowerCAmelCase_ : list ):
"""simple docstring"""
if len(lowerCAmelCase_ ) <= 1:
return input_list
lowerCAmelCase__ = list(lowerCAmelCase_ )
# iteration for two-way merging
lowerCAmelCase__ = 2
while p <= len(lowerCAmelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = i + p - 1
lowerCAmelCase__ = (low + high + 1) // 2
lowerCAmelCase__ = merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = merge(lowerCAmelCase_ , 0 , lowerCAmelCase_ , len(lowerCAmelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
UpperCamelCase = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
UpperCamelCase = []
else:
UpperCamelCase = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 61
| 1
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = JukeboxTokenizer
lowerCAmelCase_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowercase_ ( self ):
import torch
A_ = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
A_ = tokenizer(**self.metas )["input_ids"]
# fmt: off
A_ = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowercase_ ( self ):
import torch
A_ = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
A_ = tokenizer(**self.metas )["input_ids"]
# fmt: off
A_ = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 703
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCAmelCase ( snake_case__ : int = 3 )-> qiskit.result.counts.Counts:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(snake_case__ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
A_ = QuantumRegister(snake_case__ , "qr" )
A_ = ClassicalRegister(snake_case__ , "cr" )
A_ = QuantumCircuit(snake_case__ , snake_case__ )
A_ = number_of_qubits
for i in range(snake_case__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case__ , snake_case__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case__ , snake_case__ )
# simulate with 10000 shots
A_ = Aer.get_backend("qasm_simulator" )
A_ = execute(snake_case__ , snake_case__ , shots=10000 )
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 608
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = "▁"
A_ = {"vocab_file": "sentencepiece.bpe.model"}
A_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
A_ = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
A_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__( self: int , UpperCamelCase_: Dict , UpperCamelCase_: Any="<s>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: Tuple="</s>" , UpperCamelCase_: int="<s>" , UpperCamelCase_: Union[str, Any]="<unk>" , UpperCamelCase_: Union[str, Any]="<pad>" , UpperCamelCase_: int="<mask>" , UpperCamelCase_: str=None , UpperCamelCase_: Any=None , UpperCamelCase_: int=None , UpperCamelCase_: Optional[Dict[str, Any]] = None , UpperCamelCase_: str=None , UpperCamelCase_: Optional[Any]=False , **UpperCamelCase_: Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ =AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
UpperCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase_ =legacy_behaviour
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
UpperCamelCase_ =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase_ ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase_ =1
UpperCamelCase_ =len(self.sp_model )
UpperCamelCase_ ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
UpperCamelCase_ ={v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase_ =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase_ ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase_ =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase_ =src_lang if src_lang is not None else "eng_Latn"
UpperCamelCase_ =self.lang_code_to_id[self._src_lang]
UpperCamelCase_ =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: List[Any] ):
UpperCamelCase_ =self.__dict__.copy()
UpperCamelCase_ =None
UpperCamelCase_ =self.sp_model.serialized_model_proto()
return state
def __setstate__( self: int , UpperCamelCase_: List[str] ):
UpperCamelCase_ =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ ={}
UpperCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase__ ( self: List[Any] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self: Any ):
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
UpperCamelCase_ =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =[1] * len(self.prefix_tokens )
UpperCamelCase_ =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
UpperCamelCase_ =[self.sep_token_id]
UpperCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[str] , UpperCamelCase_: Optional[str] , **UpperCamelCase_: int ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCamelCase_ =src_lang
UpperCamelCase_ =self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ =self.convert_tokens_to_ids(UpperCamelCase_ )
UpperCamelCase_ =tgt_lang_id
return inputs
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ ={self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: str ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase_ =self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self: List[str] , UpperCamelCase_: str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: str ):
UpperCamelCase_ ="".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase_ =os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
UpperCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str = "eng_Latn" , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "fra_Latn" , **UpperCamelCase_: Dict , ):
UpperCamelCase_ =src_lang
UpperCamelCase_ =tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self: int ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self: List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: Dict ):
UpperCamelCase_ =self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCamelCase_ =[]
UpperCamelCase_ =[self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase_ =[self.cur_lang_code]
UpperCamelCase_ =[self.eos_token_id]
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: str ):
UpperCamelCase_ =self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCamelCase_ =[]
UpperCamelCase_ =[self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase_ =[self.cur_lang_code]
UpperCamelCase_ =[self.eos_token_id]
| 391
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = "▁"
A_ = {"vocab_file": "sentencepiece.bpe.model"}
A_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
A_ = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
A_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__( self: int , UpperCamelCase_: Dict , UpperCamelCase_: Any="<s>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: Tuple="</s>" , UpperCamelCase_: int="<s>" , UpperCamelCase_: Union[str, Any]="<unk>" , UpperCamelCase_: Union[str, Any]="<pad>" , UpperCamelCase_: int="<mask>" , UpperCamelCase_: str=None , UpperCamelCase_: Any=None , UpperCamelCase_: int=None , UpperCamelCase_: Optional[Dict[str, Any]] = None , UpperCamelCase_: str=None , UpperCamelCase_: Optional[Any]=False , **UpperCamelCase_: Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ =AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
UpperCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase_ =legacy_behaviour
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
UpperCamelCase_ =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase_ ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase_ =1
UpperCamelCase_ =len(self.sp_model )
UpperCamelCase_ ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
UpperCamelCase_ ={v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase_ =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase_ ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase_ =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase_ =src_lang if src_lang is not None else "eng_Latn"
UpperCamelCase_ =self.lang_code_to_id[self._src_lang]
UpperCamelCase_ =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: List[Any] ):
UpperCamelCase_ =self.__dict__.copy()
UpperCamelCase_ =None
UpperCamelCase_ =self.sp_model.serialized_model_proto()
return state
def __setstate__( self: int , UpperCamelCase_: List[str] ):
UpperCamelCase_ =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ ={}
UpperCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase__ ( self: List[Any] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self: Any ):
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
UpperCamelCase_ =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =[1] * len(self.prefix_tokens )
UpperCamelCase_ =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
UpperCamelCase_ =[self.sep_token_id]
UpperCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[str] , UpperCamelCase_: Optional[str] , **UpperCamelCase_: int ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCamelCase_ =src_lang
UpperCamelCase_ =self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ =self.convert_tokens_to_ids(UpperCamelCase_ )
UpperCamelCase_ =tgt_lang_id
return inputs
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ ={self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: str ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase_ =self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self: List[str] , UpperCamelCase_: str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: str ):
UpperCamelCase_ ="".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase_ =os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
UpperCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str = "eng_Latn" , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "fra_Latn" , **UpperCamelCase_: Dict , ):
UpperCamelCase_ =src_lang
UpperCamelCase_ =tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self: int ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self: List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: Dict ):
UpperCamelCase_ =self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCamelCase_ =[]
UpperCamelCase_ =[self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase_ =[self.cur_lang_code]
UpperCamelCase_ =[self.eos_token_id]
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: str ):
UpperCamelCase_ =self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCamelCase_ =[]
UpperCamelCase_ =[self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase_ =[self.cur_lang_code]
UpperCamelCase_ =[self.eos_token_id]
| 391
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class SCREAMING_SNAKE_CASE_ ( a__ ):
"""simple docstring"""
__snake_case : Dict = "ctrl"
__snake_case : str = ["past_key_values"]
__snake_case : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self :int , __lowercase :List[Any]=24_6534 , __lowercase :List[Any]=256 , __lowercase :int=1280 , __lowercase :Union[str, Any]=8192 , __lowercase :Optional[Any]=48 , __lowercase :Union[str, Any]=16 , __lowercase :int=0.1 , __lowercase :List[str]=0.1 , __lowercase :Optional[Any]=1e-6 , __lowercase :Tuple=0.02 , __lowercase :Optional[int]=True , **__lowercase :str , ):
__lowerCamelCase : Tuple =vocab_size
__lowerCamelCase : Any =n_positions
__lowerCamelCase : Optional[int] =n_embd
__lowerCamelCase : Optional[Any] =n_layer
__lowerCamelCase : str =n_head
__lowerCamelCase : Any =dff
__lowerCamelCase : Union[str, Any] =resid_pdrop
__lowerCamelCase : Union[str, Any] =embd_pdrop
__lowerCamelCase : Tuple =layer_norm_epsilon
__lowerCamelCase : str =initializer_range
__lowerCamelCase : int =use_cache
super().__init__(**lowerCamelCase_ )
| 710
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : Union[str, Any] = LDMTextToImagePipeline
__snake_case : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
__snake_case : str = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
__snake_case : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case : Optional[Any] = False
def __lowercase ( self :List[str] ):
torch.manual_seed(0 )
__lowerCamelCase : str =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__lowerCamelCase : str =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] =AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Any =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Optional[int] =CLIPTextModel(__lowercase )
__lowerCamelCase : Dict =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase : Optional[int] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __lowercase ( self :int , __lowercase :Optional[int] , __lowercase :Optional[Any]=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCamelCase : Any =torch.manual_seed(__lowercase )
else:
__lowerCamelCase : str =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCamelCase : Any ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self :List[str] ):
__lowerCamelCase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : str =self.get_dummy_components()
__lowerCamelCase : Optional[int] =LDMTextToImagePipeline(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : str =self.get_dummy_inputs(__lowercase )
__lowerCamelCase : List[Any] =pipe(**__lowercase ).images
__lowerCamelCase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__lowerCamelCase : Optional[Any] =np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self :int , __lowercase :Any , __lowercase :Optional[int]=torch.floataa , __lowercase :Dict=0 ):
__lowerCamelCase : List[str] =torch.manual_seed(__lowercase )
__lowerCamelCase : List[str] =np.random.RandomState(__lowercase ).standard_normal((1, 4, 32, 32) )
__lowerCamelCase : List[str] =torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__lowerCamelCase : Any ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self :Tuple ):
__lowerCamelCase : int =LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : Tuple =self.get_inputs(__lowercase )
__lowerCamelCase : Optional[Any] =pipe(**__lowercase ).images
__lowerCamelCase : Union[str, Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : Union[str, Any] =np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
__lowerCamelCase : Dict =np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self :Dict , __lowercase :Optional[Any] , __lowercase :int=torch.floataa , __lowercase :Dict=0 ):
__lowerCamelCase : Any =torch.manual_seed(__lowercase )
__lowerCamelCase : Dict =np.random.RandomState(__lowercase ).standard_normal((1, 4, 32, 32) )
__lowerCamelCase : str =torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__lowerCamelCase : Dict ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self :Tuple ):
__lowerCamelCase : Optional[int] =LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : List[Any] =self.get_inputs(__lowercase )
__lowerCamelCase : Optional[int] =pipe(**__lowercase ).images[0]
__lowerCamelCase : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
__lowerCamelCase : Dict =np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 363
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=snake_case_ ).to(snake_case_ )
_A = AutoTokenizer.from_pretrained('google/mt5-small' )
_A = tokenizer('Hello there' , return_tensors='pt' ).input_ids
_A = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
_A = model(input_ids.to(snake_case_ ) , labels=labels.to(snake_case_ ) ).loss
_A = -(labels.shape[-1] * loss.item())
_A = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 27
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Union[str, Any] = logging.getLogger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if metric == "rouge2":
_A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_A = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_A = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_A = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class lowerCamelCase( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
_A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_A = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A = od / 'test_results.txt'
_A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_A = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , 'a+' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
_A = val.item()
_A = F"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
_A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
try:
_A = pl_module.model.model.num_parameters()
except AttributeError:
_A = pl_module.model.num_parameters()
_A = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , 'test' )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 27
| 1
|
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__ ( A ):
'''simple docstring'''
@slow
@require_torch
def __UpperCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
_lowercase : Tuple = BertTokenizer.from_pretrained('bert-base-uncased' )
_lowercase : Optional[Any] = bertabert.config.encoder.vocab_size
_lowercase : Optional[Any] = tokenizer.sep_token_id
_lowercase : Any = tokenizer.cls_token_id
_lowercase : Tuple = 128
_lowercase : Any = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
_lowercase : Tuple = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
_lowercase : Any = train_dataset.select(range(32 ) )
_lowercase : str = val_dataset.select(range(16 ) )
_lowercase : Optional[Any] = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_ : Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowercase : int = tokenizer(batch['article'] , padding='max_length' , truncation=UpperCamelCase_ , max_length=512 )
_lowercase : List[str] = tokenizer(batch['highlights'] , padding='max_length' , truncation=UpperCamelCase_ , max_length=128 )
_lowercase : Dict = inputs.input_ids
_lowercase : str = inputs.attention_mask
_lowercase : List[Any] = outputs.input_ids
_lowercase : Optional[Any] = outputs.input_ids.copy()
_lowercase : Optional[int] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
_lowercase : Any = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_ : Union[str, Any] ):
_lowercase : int = pred.label_ids
_lowercase : Tuple = pred.predictions
# all unnecessary tokens are removed
_lowercase : Optional[int] = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
_lowercase : List[Any] = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
_lowercase : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
_lowercase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
_lowercase : Union[str, Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
_lowercase : Dict = self.get_auto_remove_tmp_dir()
_lowercase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy='steps' , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowercase : List[Any] = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 4
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4
| 1
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Any = logging.get_logger(__name__)
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = ["input_features"]
def __init__( self : str , snake_case__ : Any=80 , snake_case__ : str=1_6000 , snake_case__ : Union[str, Any]=160 , snake_case__ : Tuple=30 , snake_case__ : Dict=400 , snake_case__ : int=0.0 , snake_case__ : List[Any]=False , **snake_case__ : List[Any] , ):
super().__init__(
feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
lowerCamelCase_ : Optional[Any] =n_fft
lowerCamelCase_ : Any =hop_length
lowerCamelCase_ : Optional[Any] =chunk_length
lowerCamelCase_ : Optional[int] =chunk_length * sampling_rate
lowerCamelCase_ : Any =self.n_samples // hop_length
lowerCamelCase_ : List[Any] =sampling_rate
lowerCamelCase_ : int =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case__ , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=snake_case__ , norm="slaney" , mel_scale="slaney" , )
def UpperCAmelCase__ ( self : Any , snake_case__ : np.array ):
lowerCamelCase_ : str =spectrogram(
snake_case__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowerCamelCase_ : Optional[Any] =log_spec[:, :-1]
lowerCamelCase_ : Tuple =np.maximum(snake_case__ , log_spec.max() - 8.0 )
lowerCamelCase_ : Dict =(log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( snake_case__ : List[np.ndarray] , snake_case__ : List[np.ndarray] , snake_case__ : float = 0.0 ):
if attention_mask is not None:
lowerCamelCase_ : Dict =np.array(snake_case__ , np.intaa )
lowerCamelCase_ : List[str] =[]
for vector, length in zip(snake_case__ , attention_mask.sum(-1 ) ):
lowerCamelCase_ : Dict =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowerCamelCase_ : Optional[int] =padding_value
normed_input_values.append(snake_case__ )
else:
lowerCamelCase_ : Any =[(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , snake_case__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case__ : bool = True , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[str] = "max_length" , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , **snake_case__ : Optional[int] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCamelCase_ : Dict =isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowerCamelCase_ : Optional[int] =is_batched_numpy or (
isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ : str =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(snake_case__ , np.ndarray ):
lowerCamelCase_ : Optional[Any] =np.asarray(snake_case__ , dtype=np.floataa )
elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ : Tuple =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ : Optional[Any] =[np.asarray([raw_speech] ).T]
lowerCamelCase_ : List[Any] =BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowerCamelCase_ : int =self.pad(
snake_case__ , padding=snake_case__ , max_length=max_length if max_length else self.n_samples , truncation=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase_ : List[str] =self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowerCamelCase_ : List[str] =np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowerCamelCase_ : List[str] =padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowerCamelCase_ : str =[self._np_extract_fbank_features(snake_case__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , snake_case__ ):
lowerCamelCase_ : Optional[Any] =[np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase_ : Tuple =input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase_ : Optional[int] =padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase_ : Optional[int] =padded_inputs.convert_to_tensors(snake_case__ )
return padded_inputs
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Any =copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Optional[int] =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 153
|
"""simple docstring"""
import math
import qiskit
def _snake_case ( lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
or isinstance(lowerCamelCase__ , lowerCamelCase__ )
or isinstance(lowerCamelCase__ , lowerCamelCase__ )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(lowerCamelCase__ ) != input_a)
or (math.floor(lowerCamelCase__ ) != input_a)
or (math.floor(lowerCamelCase__ ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
lowerCamelCase_ : Optional[Any] =qiskit.QuantumRegister(4 , "qr" )
lowerCamelCase_ : List[Any] =qiskit.ClassicalRegister(2 , "cr" )
# list the entries
lowerCamelCase_ : Tuple =[input_a, input_a, carry_in]
lowerCamelCase_ : Union[str, Any] =qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCamelCase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCamelCase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCamelCase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCamelCase__ ) # measure the last two qbits
lowerCamelCase_ : Any =qiskit.Aer.get_backend("aer_simulator" )
lowerCamelCase_ : str =qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_000 )
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 153
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__magic_name__ : List[Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = ['''BeitFeatureExtractor''']
__magic_name__ : Optional[int] = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[Any] = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Any = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__magic_name__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711
|
import argparse
import os
import re
import packaging.version
__magic_name__ : Dict = '''examples/'''
__magic_name__ : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__magic_name__ : Any = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__magic_name__ : int = '''README.md'''
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Union[str, Any]:
"""simple docstring"""
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase = f.read()
UpperCamelCase , UpperCamelCase = REPLACE_PATTERNS[pattern]
UpperCamelCase = replace.replace('VERSION' , _UpperCamelCase)
UpperCamelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase)
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n') as f:
f.write(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase) -> Dict:
"""simple docstring"""
for folder, directories, fnames in os.walk(_UpperCamelCase):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects')
if "legacy" in directories:
directories.remove('legacy')
for fname in fnames:
if fname.endswith('.py'):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase) , _UpperCamelCase , pattern='examples')
def lowercase__ ( _UpperCamelCase , _UpperCamelCase=False) -> Any:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
if not patch:
update_version_in_examples(_UpperCamelCase)
def lowercase__ ( ) -> str:
"""simple docstring"""
UpperCamelCase = '🤗 Transformers currently provides the following architectures'
UpperCamelCase = '1. Want to contribute a new model?'
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase = f.readlines()
# Find the start of the list.
UpperCamelCase = 0
while not lines[start_index].startswith(_start_prompt):
start_index += 1
start_index += 1
UpperCamelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt):
if lines[index].startswith('1.'):
UpperCamelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n') as f:
f.writelines(_UpperCamelCase)
def lowercase__ ( ) -> str:
"""simple docstring"""
with open(REPLACE_FILES['init'] , 'r') as f:
UpperCamelCase = f.read()
UpperCamelCase = REPLACE_PATTERNS['init'][0].search(_UpperCamelCase).groups()[0]
return packaging.version.parse(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase=False) -> str:
"""simple docstring"""
UpperCamelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!')
if default_version.is_devrelease:
UpperCamelCase = default_version.base_version
elif patch:
UpperCamelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
UpperCamelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
UpperCamelCase = input(F'Which version are you releasing? [{default_version}]')
if len(_UpperCamelCase) == 0:
UpperCamelCase = default_version
print(F'Updating version to {version}.')
global_version_update(_UpperCamelCase , patch=_UpperCamelCase)
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.')
clean_main_ref_in_model_list()
def lowercase__ ( ) -> int:
"""simple docstring"""
UpperCamelCase = get_version()
UpperCamelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
UpperCamelCase = current_version.base_version
# Check with the user we got that right.
UpperCamelCase = input(F'Which version are we developing now? [{dev_version}]')
if len(_UpperCamelCase) == 0:
UpperCamelCase = dev_version
print(F'Updating version to {version}.')
global_version_update(_UpperCamelCase)
print('Cleaning main README, don\'t forget to run `make fix-copies`.')
clean_main_ref_in_model_list()
if __name__ == "__main__":
__magic_name__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__magic_name__ : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 410
| 0
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowercase_ = 5
lowercase_ = 10
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = SpeechaTextTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : Union[str, Any] = sp.SentencePieceProcessor()
spm_model.Load(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_A ) )]
__SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
__SCREAMING_SNAKE_CASE : List[str] = Path(self.tmpdirname )
save_json(_A , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
__SCREAMING_SNAKE_CASE : Optional[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<pad>'''
__SCREAMING_SNAKE_CASE : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_A ) , 1001 )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [289, 50, 14, 174, 386] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = '''valhalla/s2t_mustc_multilinguial_medium'''
lowerCAmelCase_ = '''C\'est trop cool'''
lowerCAmelCase_ = '''Esto es genial'''
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
self.assertIn(_A , self.tokenizer.all_special_ids )
__SCREAMING_SNAKE_CASE : Any = [ES_CODE, 4, 1601, 47, 7647, 2]
__SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(_A , skip_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = '''fr'''
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _A )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__SCREAMING_SNAKE_CASE : Optional[int] = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 74
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : List[str] = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ["""GLPNFeatureExtractor"""]
__SCREAMING_SNAKE_CASE : str = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 244
| 0
|
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 292
|
"""simple docstring"""
def lowercase_ ( _lowercase : List[str] , _lowercase : Tuple , _lowercase : int , _lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : int = [False] * len(_lowercase )
UpperCAmelCase : List[str] = []
queue.append(_lowercase )
UpperCAmelCase : List[Any] = True
while queue:
UpperCAmelCase : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowercase )
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : int = u
return visited[t]
def lowercase_ ( _lowercase : Tuple , _lowercase : Dict , _lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = [-1] * (len(_lowercase ))
UpperCAmelCase : List[Any] = 0
while bfs(_lowercase , _lowercase , _lowercase , _lowercase ):
UpperCAmelCase : List[str] = float("Inf" )
UpperCAmelCase : str = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase : Tuple = min(_lowercase , graph[parent[s]][s] )
UpperCAmelCase : Optional[Any] = parent[s]
max_flow += path_flow
UpperCAmelCase : List[str] = sink
while v != source:
UpperCAmelCase : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase : Dict = parent[v]
return max_flow
snake_case_ : Optional[Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
snake_case_ , snake_case_ : str = 0, 5
print(ford_fulkerson(graph, source, sink))
| 292
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class UpperCamelCase ( a_ ):
a__ :Any = '''ctrl'''
a__ :str = ['''past_key_values''']
a__ :Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self , __UpperCamelCase=246_534 , __UpperCamelCase=256 , __UpperCamelCase=1_280 , __UpperCamelCase=8_192 , __UpperCamelCase=48 , __UpperCamelCase=16 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=1E-6 , __UpperCamelCase=0.02 , __UpperCamelCase=True , **__UpperCamelCase , ) -> List[str]:
UpperCamelCase_ : Dict = vocab_size
UpperCamelCase_ : Tuple = n_positions
UpperCamelCase_ : Union[str, Any] = n_embd
UpperCamelCase_ : Tuple = n_layer
UpperCamelCase_ : List[Any] = n_head
UpperCamelCase_ : Optional[int] = dff
UpperCamelCase_ : Optional[int] = resid_pdrop
UpperCamelCase_ : Dict = embd_pdrop
UpperCamelCase_ : Tuple = layer_norm_epsilon
UpperCamelCase_ : Union[str, Any] = initializer_range
UpperCamelCase_ : List[Any] = use_cache
super().__init__(**__SCREAMING_SNAKE_CASE )
| 635
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = original_name.split("." )[0]
__a = key.split("." )
__a = int(key_list[key_list.index(_A ) - 2] )
__a = int(key_list[key_list.index(_A ) - 1] )
__a = orig_block_num - offset
__a = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __A ( _A ):
"""simple docstring"""
__a = OrderedDict()
__a , __a = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
__a = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
__a = key[: key.find("proj" )]
__a = key.replace(_A , f"""patch_embeddings.{total_embed_found}.""" )
__a = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
__a = "poolformer.encoder." + key
if "mlp.fc1" in key:
__a = replace_key_with_offset(_A , _A , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
__a = replace_key_with_offset(_A , _A , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
__a = replace_key_with_offset(_A , _A , "norm1" , "before_norm" )
if "norm2" in key:
__a = replace_key_with_offset(_A , _A , "norm2" , "after_norm" )
if "layer_scale_1" in key:
__a = replace_key_with_offset(_A , _A , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
__a = replace_key_with_offset(_A , _A , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
__a = key.replace("head" , "classifier" )
__a = value
return new_state_dict
def __A ( ):
"""simple docstring"""
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(_A , stream=_A ).raw )
return image
@torch.no_grad()
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = PoolFormerConfig()
# set attributes based on model_name
__a = "huggingface/label-files"
__a = model_name[-3:]
__a = 1000
__a = "imagenet-1k-id2label.json"
__a = (1, 1000)
# set config attributes
__a = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
__a = {int(_A ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
if size == "s12":
__a = [2, 2, 6, 2]
__a = [64, 128, 320, 512]
__a = 4.0
__a = 0.9
elif size == "s24":
__a = [4, 4, 12, 4]
__a = [64, 128, 320, 512]
__a = 4.0
__a = 0.9
elif size == "s36":
__a = [6, 6, 18, 6]
__a = [64, 128, 320, 512]
__a = 4.0
__a = 1E-6
__a = 0.9
elif size == "m36":
__a = [6, 6, 18, 6]
__a = [96, 192, 384, 768]
__a = 4.0
__a = 1E-6
__a = 0.95
elif size == "m48":
__a = [8, 8, 24, 8]
__a = [96, 192, 384, 768]
__a = 4.0
__a = 1E-6
__a = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
__a = PoolFormerImageProcessor(crop_pct=_A )
# Prepare image
__a = prepare_img()
__a = image_processor(images=_A , return_tensors="pt" ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
__a = torch.load(_A , map_location=torch.device("cpu" ) )
# rename keys
__a = rename_keys(_A )
# create HuggingFace model and load state dict
__a = PoolFormerForImageClassification(_A )
model.load_state_dict(_A )
model.eval()
# Define image processor
__a = PoolFormerImageProcessor(crop_pct=_A )
__a = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
__a = model(_A )
__a = outputs.logits
# define expected logit slices for different models
if size == "s12":
__a = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__a = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__a = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__a = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__a = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _A , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 197
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : str = dataset
__UpperCAmelCase : str = process
__UpperCAmelCase : str = params
def __len__( self : str ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.dataset[i]
__UpperCAmelCase : List[str] = self.process(UpperCamelCase , **self.params )
return processed
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Any=None ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = loader
__UpperCAmelCase : Dict = infer
__UpperCAmelCase : Tuple = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Optional[int] = loader_batch_size
# Internal bookkeeping
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Tuple = None
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = iter(self.loader )
return self
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__UpperCAmelCase : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__UpperCAmelCase : str = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
# Convert ModelOutput to tuple first
__UpperCAmelCase : List[str] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__UpperCAmelCase : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__UpperCAmelCase : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase , UpperCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__UpperCAmelCase : str = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__UpperCAmelCase : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__UpperCAmelCase : Optional[Any] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__UpperCAmelCase : Optional[int] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__UpperCAmelCase : Optional[int] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__UpperCAmelCase : List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__UpperCAmelCase : List[Any] = self._loader_batch_data.__class__(UpperCamelCase )
self._loader_batch_index += 1
return result
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__UpperCAmelCase : str = next(self.iterator )
__UpperCAmelCase : Optional[int] = self.infer(UpperCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase , torch.Tensor ):
__UpperCAmelCase : int = processed
else:
__UpperCAmelCase : List[Any] = list(processed.keys() )[0]
__UpperCAmelCase : Any = processed[key]
if isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : str = len(UpperCamelCase )
else:
__UpperCAmelCase : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__UpperCAmelCase : str = observed_batch_size
# Setting internal index to unwrap the batch
__UpperCAmelCase : List[str] = processed
__UpperCAmelCase : Tuple = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def __iter__( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Any = iter(self.loader )
__UpperCAmelCase : Optional[int] = None
return self
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
if self.subiterator is None:
__UpperCAmelCase : List[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__UpperCAmelCase : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__UpperCAmelCase : Dict = self.infer(next(self.iterator ) , **self.params )
__UpperCAmelCase : Optional[int] = next(self.subiterator )
return processed
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __iter__( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : str = iter(self.loader )
return self
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Optional[Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__UpperCAmelCase : str = self.loader_batch_item()
__UpperCAmelCase : Any = item.pop("""is_last""" )
accumulator.append(UpperCamelCase )
if is_last:
return accumulator
while not is_last:
__UpperCAmelCase : Tuple = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase , torch.Tensor ):
__UpperCAmelCase : Any = processed
else:
__UpperCAmelCase : int = list(processed.keys() )[0]
__UpperCAmelCase : Union[str, Any] = processed[key]
if isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : Dict = len(UpperCamelCase )
else:
__UpperCAmelCase : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__UpperCAmelCase : Optional[int] = observed_batch_size
__UpperCAmelCase : Any = processed
__UpperCAmelCase : Tuple = 0
while self._loader_batch_index < self.loader_batch_size:
__UpperCAmelCase : int = self.loader_batch_item()
__UpperCAmelCase : List[Any] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase )
if is_last:
return accumulator
else:
__UpperCAmelCase : Optional[int] = processed
__UpperCAmelCase : Any = item.pop("""is_last""" )
accumulator.append(UpperCamelCase )
return accumulator
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : Dataset , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Dict = dataset
__UpperCAmelCase : str = key
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase : Dataset , UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : int = dataset
__UpperCAmelCase : Optional[int] = keya
__UpperCAmelCase : int = keya
def __len__( self : str ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 299
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """bloom"""
__a = ["""past_key_values"""]
__a = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Optional[Any] , UpperCamelCase : Any=250_880 , UpperCamelCase : int=64 , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=8 , UpperCamelCase : int=1e-5 , UpperCamelCase : str=0.02 , UpperCamelCase : List[str]=True , UpperCamelCase : Dict=1 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : Optional[Any]=False , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : Optional[int]=1 , UpperCamelCase : Any=False , **UpperCamelCase : str , ):
'''simple docstring'''
__UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""n_embed""" , UpperCamelCase )
__UpperCAmelCase : Dict = hidden_size if n_embed is None else n_embed
__UpperCAmelCase : List[Any] = n_layer
__UpperCAmelCase : Tuple = n_head
__UpperCAmelCase : Tuple = layer_norm_epsilon
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : Union[str, Any] = pretraining_tp
__UpperCAmelCase : Optional[int] = apply_residual_connection_post_layernorm
__UpperCAmelCase : List[Any] = hidden_dropout
__UpperCAmelCase : List[str] = attention_dropout
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
__UpperCAmelCase : List[Any] = slow_but_exact
super().__init__(bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = version.parse("""1.12""" )
def __init__( self : Optional[Any] , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" , UpperCamelCase : List[PatchingSpec] = None , UpperCamelCase : bool = False , ):
'''simple docstring'''
super().__init__(UpperCamelCase , task=UpperCamelCase , patching_specs=UpperCamelCase , use_past=UpperCamelCase )
if not getattr(self._config , """pad_token_id""" , UpperCamelCase ):
# TODO: how to do that better?
__UpperCAmelCase : List[str] = 0
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(UpperCamelCase , direction="""inputs""" , inverted_values_shape=UpperCamelCase )
__UpperCAmelCase : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCAmelCase : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return self._config.n_head
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return 1e-3
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : "PreTrainedTokenizer" , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional["TensorType"] = None , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = super(UpperCamelCase , self ).generate_dummy_inputs(
UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCAmelCase ,__UpperCAmelCase : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCAmelCase : Union[str, Any] = seqlen + 2
__UpperCAmelCase : int = self._config.hidden_size // self.num_attention_heads
__UpperCAmelCase : Optional[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__UpperCAmelCase : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__UpperCAmelCase : str = [
(torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers )
]
__UpperCAmelCase : Union[str, Any] = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCAmelCase : List[str] = ordered_inputs["""attention_mask"""].dtype
__UpperCAmelCase : List[str] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 13
| 299
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
_snake_case = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
_snake_case = '''The dog is cute and lives in the garden house'''
_snake_case = jnp.array([tokenizer.encode(__lowerCamelCase )] )
_snake_case = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
_snake_case = model(__lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __lowerCamelCase , atol=1E-3 ) )
| 103
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_A = TextToVideoSDPipeline
_A = TEXT_TO_IMAGE_PARAMS
_A = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_A = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_: int = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCamelCase_: Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
lowerCamelCase_: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCamelCase_: str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
lowerCamelCase_: Dict = CLIPTextModel(A_ )
lowerCamelCase_: Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase_: Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCAmelCase ( self : Optional[int] , A_ : Union[str, Any] , A_ : Dict=0 ) -> List[Any]:
"""simple docstring"""
if str(A_ ).startswith("""mps""" ):
lowerCamelCase_: Dict = torch.manual_seed(A_ )
else:
lowerCamelCase_: Tuple = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase_: Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_: Optional[int] = self.get_dummy_components()
lowerCamelCase_: Any = TextToVideoSDPipeline(**A_ )
lowerCamelCase_: int = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_: Tuple = self.get_dummy_inputs(A_ )
lowerCamelCase_: str = """np"""
lowerCamelCase_: int = sd_pipe(**A_ ).frames
lowerCamelCase_: Optional[int] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCamelCase_: Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class a__ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_: int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
lowerCamelCase_: str = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
lowerCamelCase_: Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_: List[Any] = pipe.to("""cuda""" )
lowerCamelCase_: Optional[Any] = """Spiderman is surfing"""
lowerCamelCase_: int = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase_: List[Any] = pipe(A_ , generator=A_ , num_inference_steps=25 , output_type="""pt""" ).frames
lowerCamelCase_: Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
lowerCamelCase_: int = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
lowerCamelCase_: Optional[Any] = pipe.to("""cuda""" )
lowerCamelCase_: Union[str, Any] = """Spiderman is surfing"""
lowerCamelCase_: Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase_: Any = pipe(A_ , generator=A_ , num_inference_steps=2 , output_type="""pt""" ).frames
lowerCamelCase_: int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 423
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
snake_case__ : Dict = logging.getLogger(__name__)
snake_case__ : int = {"facebook/bart-base": BartForConditionalGeneration}
snake_case__ : List[str] = {"facebook/bart-base": BartTokenizer}
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.')
parser.add_argument(
'--validation_file' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='A csv or a json file containing the validation data.')
parser.add_argument(
'--max_length' , type=__lowerCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=__lowerCAmelCase , )
parser.add_argument(
'--config_name' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=__lowerCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='Where to store the final ONNX file.')
UpperCamelCase_ = parser.parse_args()
return args
def _snake_case (__lowercase , __lowercase="cpu"):
UpperCamelCase_ = model_dict[model_name].from_pretrained(__lowerCAmelCase).to(__lowerCAmelCase)
UpperCamelCase_ = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase)
if model_name in ["facebook/bart-base"]:
UpperCamelCase_ = 0
UpperCamelCase_ = None
UpperCamelCase_ = 0
return huggingface_model, tokenizer
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase):
model.eval()
UpperCamelCase_ = None
UpperCamelCase_ = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase))
with torch.no_grad():
UpperCamelCase_ = """My friends are cool but they eat too many carbs."""
UpperCamelCase_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='pt').to(model.device)
UpperCamelCase_ = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=__lowerCAmelCase , )
logger.info('Model exported to {}'.format(__lowerCAmelCase))
UpperCamelCase_ = remove_dup_initializers(os.path.abspath(__lowerCAmelCase))
logger.info('Deduplicated and optimized model written to {}'.format(__lowerCAmelCase))
UpperCamelCase_ = onnxruntime.InferenceSession(__lowerCAmelCase)
UpperCamelCase_ = ort_sess.run(
__lowerCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(__lowerCAmelCase),
'max_length': np.array(__lowerCAmelCase),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3)
logger.info('Model outputs from torch and ONNX Runtime are similar.')
logger.info('Success.')
def _snake_case ():
UpperCamelCase_ = parse_args()
UpperCamelCase_ = 5
UpperCamelCase_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
UpperCamelCase_ = torch.device(args.device)
UpperCamelCase_ = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase)
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
model.to(__lowerCAmelCase)
if args.max_length:
UpperCamelCase_ = args.max_length
if args.num_beams:
UpperCamelCase_ = args.num_beams
if args.output_file_path:
UpperCamelCase_ = args.output_file_path
else:
UpperCamelCase_ = """BART.onnx"""
logger.info('Exporting model to ONNX')
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
if __name__ == "__main__":
main()
| 712
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
snake_case__ : Any = logging.get_logger(__name__)
@dataclass
class _a :
"""simple docstring"""
A_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
A_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
A_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
A_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.task_name.lower()
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """train"""
A_ = """dev"""
A_ = """test"""
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = Split.train , _UpperCAmelCase = None , ) -> Tuple:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _UpperCAmelCase , )
UpperCamelCase_ = args
UpperCamelCase_ = glue_processors[args.task_name]()
UpperCamelCase_ = glue_output_modes[args.task_name]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
UpperCamelCase_ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
UpperCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCamelCase_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
UpperCamelCase_ = time.time()
UpperCamelCase_ = torch.load(_UpperCAmelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase_ = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase_ = examples[:limit_length]
UpperCamelCase_ = glue_convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , max_length=args.max_seq_length , label_list=_UpperCAmelCase , output_mode=self.output_mode , )
UpperCamelCase_ = time.time()
torch.save(self.features , _UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Tuple:
return self.label_list
| 618
| 0
|
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = 0
for i in range(1 , 1001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 33
|
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : int = 50):
lowerCamelCase : List[Any] = [1] * (length + 1)
for row_length in range(length + 1):
for tile_length in range(2 , 5):
for tile_start in range(row_length - tile_length + 1):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 320
| 0
|
from __future__ import annotations
import os
from collections.abc import Mapping
__lowerCAmelCase = tuple[int, int]
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
lowercase__: Optional[Any] = vertices
lowercase__: int = {
(min(lowerCAmelCase__ ), max(lowerCAmelCase__ )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowercase__: Optional[Any] = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: List[str] = Graph({min(self.vertices )} , {} )
lowercase__: Dict = 42
lowercase__: str = 42
lowercase__: Optional[Any] = 42
lowercase__: str = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowercase__: Optional[int] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowercase__: Any = edge
lowercase__: int = weight
subgraph.add_edge(lowerCAmelCase__ , lowerCAmelCase__ )
return subgraph
def snake_case_ ( snake_case = "p107_network.txt" ) -> int:
lowercase__: Tuple = os.path.abspath(os.path.dirname(_UpperCamelCase ) )
lowercase__: List[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase__: Union[str, Any] = {}
lowercase__: int = 42
lowercase__: str = 42
lowercase__: Any = 42
with open(_UpperCamelCase ) as f:
lowercase__: List[str] = f.read().strip().split('\n' )
lowercase__: List[str] = [line.split(',' ) for line in data]
for edgea in range(1 , len(_UpperCamelCase ) ):
for edgea in range(_UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowercase__: Optional[int] = int(adjaceny_matrix[edgea][edgea] )
lowercase__: Union[str, Any] = Graph(set(range(len(_UpperCamelCase ) ) ) , _UpperCamelCase )
lowercase__: Optional[Any] = graph.prims_algorithm()
lowercase__: Optional[int] = sum(graph.edges.values() )
lowercase__: str = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 716
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = DDIMPipeline
__lowercase : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
__lowercase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__: Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
lowercase__: int = DDIMScheduler()
lowercase__: List[Any] = {'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[str]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith('mps' ):
lowercase__: Any = torch.manual_seed(lowerCAmelCase__ )
else:
lowercase__: Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowercase__: Any = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: int = 'cpu'
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowercase__: str = pipe(**lowerCAmelCase__ ).images
lowercase__: List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowercase__: Optional[Any] = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowercase__: int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Tuple = 'google/ddpm-cifar10-32'
lowercase__: Union[str, Any] = UNetaDModel.from_pretrained(lowerCAmelCase__ )
lowercase__: Optional[Any] = DDIMScheduler()
lowercase__: List[str] = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddim.to(lowerCAmelCase__ )
ddim.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.manual_seed(0 )
lowercase__: str = ddim(generator=lowerCAmelCase__ , eta=0.0 , output_type='numpy' ).images
lowercase__: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__: Optional[Any] = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = 'google/ddpm-ema-bedroom-256'
lowercase__: int = UNetaDModel.from_pretrained(lowerCAmelCase__ )
lowercase__: Tuple = DDIMScheduler.from_pretrained(lowerCAmelCase__ )
lowercase__: Any = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddpm.to(lowerCAmelCase__ )
ddpm.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Optional[int] = torch.manual_seed(0 )
lowercase__: Tuple = ddpm(generator=lowerCAmelCase__ , output_type='numpy' ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__: List[str] = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 335
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case__ : List[str] = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 278
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__lowerCAmelCase ), magnitude * sin(__lowerCAmelCase )]
return [magnitude * cos(radians(__lowerCAmelCase ) ), magnitude * sin(radians(__lowerCAmelCase ) )]
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10**-1 ) -> bool:
'''simple docstring'''
lowerCamelCase__ =cross(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =sum(__lowerCAmelCase )
return abs(__lowerCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
a =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a =array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
a =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 530
| 0
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1024 , _lowerCamelCase=1024 , _lowerCamelCase=3.6 ) -> Dict:
A_ : Optional[int] = tokenizer
A_ : Any = tokenizer.bos_token_id
A_ : int = dataset
A_ : str = seq_length
A_ : Optional[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> Dict:
A_ : List[Any] = iter(self.dataset )
A_ : Dict = True
while more_examples:
A_ , A_ : int = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_lowerCamelCase )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
A_ : List[str] = False
break
A_ : Any = tokenizer(_lowerCamelCase , truncation=_lowerCamelCase )["""input_ids"""]
A_ : Tuple = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_lowerCamelCase ) , self.seq_length ):
A_ : List[Any] = all_token_ids[i : i + self.seq_length]
if len(_lowerCamelCase ) == self.seq_length:
yield torch.tensor(_lowerCamelCase )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Dict = {"""streaming""": True}
A_ : Any = load_dataset(args.dataset_name , split="""train""" , **a_ )
A_ : Optional[Any] = ConstantLengthDataset(a_ , a_ , seq_length=args.seq_length )
A_ : Optional[int] = DataLoader(a_ , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
model.eval()
A_ : Any = []
for step, batch in enumerate(a_ ):
with torch.no_grad():
A_ : Optional[Any] = model(a_ , labels=a_ )
A_ : Optional[int] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A_ : Any = torch.mean(torch.cat(a_ ) )
try:
A_ : Dict = torch.exp(a_ )
except OverflowError:
A_ : Optional[int] = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCamelCase__ : List[str] = Accelerator()
# Parse configuration
UpperCamelCase__ : int = HfArgumentParser(EvaluationArguments)
UpperCamelCase__ : str = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCamelCase__ : Tuple = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCamelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCamelCase__ : List[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = evaluate(args)
logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 385
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase__ : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
UpperCamelCase__ : Dict = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase = RetriBertTokenizer
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) -> Tuple:
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
A_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCamelCase ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_lowerCamelCase , normalizer_state.pop("""type""" ) )
A_ : List[str] = do_lower_case
A_ : List[Any] = strip_accents
A_ : Optional[int] = tokenize_chinese_chars
A_ : int = normalizer_class(**_lowerCamelCase )
A_ : Tuple = do_lower_case
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ) -> Any:
A_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
A_ : Optional[int] = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
A_ : Dict = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 385
| 1
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
A__ : Tuple = {"""UserAgent""": UserAgent().random}
def _a ( __UpperCamelCase : Optional[Any] ):
lowerCAmelCase__ : Any = script.contents[0]
lowerCAmelCase__ : Dict = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase :
def __init__( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Dict = f'''https://www.instagram.com/{username}/'''
lowerCAmelCase__ : int = self.get_json()
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text
lowerCAmelCase__ : List[str] = BeautifulSoup(SCREAMING_SNAKE_CASE__ , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
"""simple docstring"""
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
"""simple docstring"""
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowercase_ ( self ):
"""simple docstring"""
return self.user_data["username"]
@property
def lowercase_ ( self ):
"""simple docstring"""
return self.user_data["full_name"]
@property
def lowercase_ ( self ):
"""simple docstring"""
return self.user_data["biography"]
@property
def lowercase_ ( self ):
"""simple docstring"""
return self.user_data["business_email"]
@property
def lowercase_ ( self ):
"""simple docstring"""
return self.user_data["external_url"]
@property
def lowercase_ ( self ):
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase_ ( self ):
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def lowercase_ ( self ):
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase_ ( self ):
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def lowercase_ ( self ):
"""simple docstring"""
return self.user_data["is_verified"]
@property
def lowercase_ ( self ):
"""simple docstring"""
return self.user_data["is_private"]
def _a ( __UpperCamelCase : str = "github" ):
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
lowerCAmelCase__ : Any = InstagramUser(__UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,__UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : int = InstagramUser("""github""")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 233
|
from math import isclose, sqrt
def _a ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ):
lowerCAmelCase__ : Union[str, Any] = point_y / 4 / point_x
lowerCAmelCase__ : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase__ : str = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase__ : List[Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase__ : str = outgoing_gradient**2 + 4
lowerCAmelCase__ : List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase__ : Tuple = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCAmelCase__ : Optional[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase__ : Any = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase__ : Tuple = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCAmelCase__ : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _a ( __UpperCamelCase : float = 1.4 ,__UpperCamelCase : float = -9.6 ):
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : float = first_x_coord
lowerCAmelCase__ : float = first_y_coord
lowerCAmelCase__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 233
| 1
|
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(UpperCAmelCase__ ) * abs(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 102
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
lowerCamelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
lowerCamelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
lowerCamelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 102
| 1
|
'''simple docstring'''
def lowerCamelCase_ ( A_ ):
__lowerCamelCase = len(A_ )
for i in range(A_ ):
for j in range(i + 1 , A_ ):
if numbers[j] < numbers[i]:
__lowerCamelCase , __lowerCamelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] =input("Enter numbers separated by a comma:\n").strip()
_UpperCamelCase : List[Any] =[int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 316
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__( enum.Enum ):
__magic_name__ : Tuple = 0
__magic_name__ : Union[str, Any] = 1
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[Any] = "generated"
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : int )-> Dict:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def a__( self : Tuple , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Union[str, Any] , )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = {}
if truncation is not None:
UpperCAmelCase = truncation
UpperCAmelCase = generate_kwargs
UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase = self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
if len(lowerCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a__( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int )-> Optional[Any]:
"""simple docstring"""
return True
def a__( self : Optional[int] , *lowerCAmelCase : Tuple , lowerCAmelCase : int )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , lowerCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
UpperCAmelCase = ([prefix + arg for arg in args[0]],)
UpperCAmelCase = True
elif isinstance(args[0] , lowerCAmelCase ):
UpperCAmelCase = (prefix + args[0],)
UpperCAmelCase = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
UpperCAmelCase = self.tokenizer(*lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = super().__call__(*lowerCAmelCase , **lowerCAmelCase )
if (
isinstance(args[0] , lowerCAmelCase )
and all(isinstance(lowerCAmelCase , lowerCAmelCase ) for el in args[0] )
and all(len(lowerCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def a__( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=TruncationStrategy.DO_NOT_TRUNCATE , **lowerCAmelCase : List[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self._parse_and_tokenize(lowerCAmelCase , truncation=lowerCAmelCase , **lowerCAmelCase )
return inputs
def a__( self : Optional[int] , lowerCAmelCase : str , **lowerCAmelCase : Dict )-> str:
"""simple docstring"""
if self.framework == "pt":
UpperCAmelCase , UpperCAmelCase = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
UpperCAmelCase , UpperCAmelCase = tf.shape(model_inputs['''input_ids'''] ).numpy()
UpperCAmelCase = generate_kwargs.get('''min_length''' , self.model.config.min_length )
UpperCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(lowerCAmelCase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
UpperCAmelCase = self.model.generate(**lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase = output_ids.reshape(lowerCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase = tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str=ReturnType.TEXT , lowerCAmelCase : Tuple=False )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase = {
F"""{self.return_name}_text""": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = "summary"
def __call__( self : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Union[str, Any] )-> Dict:
"""simple docstring"""
return super().__call__(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int )-> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Any = "translation"
def a__( self : Any , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int )-> Union[str, Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def a__( self : int , *lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=None )-> Any:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , lowerCAmelCase ):
return self.tokenizer._build_translation_inputs(
*lowerCAmelCase , return_tensors=self.framework , truncation=lowerCAmelCase , src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase )
else:
return super()._parse_and_tokenize(*lowerCAmelCase , truncation=lowerCAmelCase )
def a__( self : Any , lowerCAmelCase : int=None , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : List[str] )-> str:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = super()._sanitize_parameters(**lowerCAmelCase )
if src_lang is not None:
UpperCAmelCase = src_lang
if tgt_lang is not None:
UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase = kwargs.get('''task''' , self.task )
UpperCAmelCase = task.split('''_''' )
if task and len(lowerCAmelCase ) == 4:
# translation, XX, to YY
UpperCAmelCase = items[1]
UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int )-> Tuple:
"""simple docstring"""
return super().__call__(*lowerCAmelCase , **lowerCAmelCase )
| 210
| 0
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
_lowerCamelCase = Mock()
_lowerCamelCase = conn, Mock()
_lowerCamelCase = iter([1, None] )
_lowerCamelCase = lambda _a : next(_a )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=_a )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 297
|
from __future__ import annotations
from typing import Any
def _lowerCamelCase ( _a ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCamelCase = {'''+''', '''-''', '''*''', '''/'''}
_lowerCamelCase = []
for token in postfix_notation:
if token in operations:
_lowerCamelCase , _lowerCamelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_a ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.