code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : str = field(default='''question-answering-extractive''' ,metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase_ : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
UpperCAmelCase_ : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
UpperCAmelCase_ : str = "question"
UpperCAmelCase_ : str = "context"
UpperCAmelCase_ : str = "answers"
@property
def lowerCAmelCase__ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 614
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase__ : str = get_tests_dir('fixtures')
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = mock.Mock()
UpperCAmelCase__ : Dict = 500
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : List[str] = HTTPError
UpperCAmelCase__ : Tuple = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase__ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase__ : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
UpperCAmelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' ,subfolder='''feature_extractor''' )
self.assertIsNotNone(lowerCamelCase_ )
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ViTImageProcessor.from_pretrained(lowerCamelCase_ )
image_processor.push_to_hub('''test-image-processor''' ,use_auth_token=self._token )
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCamelCase_ ,repo_id='''test-image-processor''' ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase__ : List[Any] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ViTImageProcessor.from_pretrained(lowerCamelCase_ )
image_processor.push_to_hub('''valid_org/test-image-processor''' ,use_auth_token=self._token )
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCamelCase_ ,repo_id='''valid_org/test-image-processor-org''' ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase__ : List[str] = CustomImageProcessor.from_pretrained(lowerCamelCase_ )
image_processor.push_to_hub('''test-dynamic-image-processor''' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map ,{'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} ,)
UpperCAmelCase__ : Any = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ ,'''CustomImageProcessor''' )
| 614
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =data
SCREAMING_SNAKE_CASE_ : Node | None =None
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Tuple =None
SCREAMING_SNAKE_CASE_ : Optional[int] =None
def __iter__( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] =self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE_ : int =node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(UpperCamelCase__ ) for item in iter(self ) )
def __lowerCamelCase ( self , __UpperCAmelCase ):
self.insert_nth(len(self ) , UpperCamelCase__ )
def __lowerCamelCase ( self , __UpperCAmelCase ):
self.insert_nth(0 , UpperCamelCase__ )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =Node(UpperCamelCase__ )
if self.head is None:
SCREAMING_SNAKE_CASE_ : Dict =new_node # first node points itself
SCREAMING_SNAKE_CASE_ : int =new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE_ : List[str] =self.head
SCREAMING_SNAKE_CASE_ : Optional[int] =new_node
else:
SCREAMING_SNAKE_CASE_ : List[Any] =self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =temp.next
SCREAMING_SNAKE_CASE_ : Union[str, Any] =temp.next
SCREAMING_SNAKE_CASE_ : Optional[int] =new_node
if index == len(self ) - 1: # insert at tail
SCREAMING_SNAKE_CASE_ : int =new_node
def __lowerCamelCase ( self ):
return self.delete_nth(0 )
def __lowerCamelCase ( self ):
return self.delete_nth(len(self ) - 1 )
def __lowerCamelCase ( self , __UpperCAmelCase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE_ : Dict =None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE_ : Any =self.tail.next.next
SCREAMING_SNAKE_CASE_ : List[Any] =self.head.next
else:
SCREAMING_SNAKE_CASE_ : str =self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE_ : Tuple =temp.next
SCREAMING_SNAKE_CASE_ : Union[str, Any] =temp.next
SCREAMING_SNAKE_CASE_ : Dict =temp.next.next
if index == len(self ) - 1: # delete at tail
SCREAMING_SNAKE_CASE_ : Tuple =temp
return delete_node.data
def __lowerCamelCase ( self ):
return len(self ) == 0
def SCREAMING_SNAKE_CASE__ ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =CircularLinkedList()
assert len(lowerCAmelCase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowerCAmelCase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowerCAmelCase__ ) == i
circular_linked_list.insert_nth(lowerCAmelCase__ ,i + 1 )
assert str(lowerCAmelCase__ ) == "->".join(str(lowerCAmelCase__ ) for i in range(1 ,6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowerCAmelCase__ ) == "->".join(str(lowerCAmelCase__ ) for i in range(1 ,7 ) )
circular_linked_list.insert_head(0 )
assert str(lowerCAmelCase__ ) == "->".join(str(lowerCAmelCase__ ) for i in range(0 ,7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowerCAmelCase__ ) == "->".join(str(lowerCAmelCase__ ) for i in range(1 ,6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 ,3 )
assert str(lowerCAmelCase__ ) == "->".join(str(lowerCAmelCase__ ) for i in range(1 ,6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['DPTFeatureExtractor']
__SCREAMING_SNAKE_CASE = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 153
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def snake_case_ ( A_ : Callable[[int | float], int | float], A_ : int | float, A_ : int | float, A_ : int = 1_00, ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = x_start
_lowerCamelCase : int = fnc(A_ )
_lowerCamelCase : Optional[Any] = 0.0
for _ in range(A_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_lowerCamelCase : int = (x_end - x_start) / steps + xa
_lowerCamelCase : Tuple = fnc(A_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_lowerCamelCase : Optional[Any] = xa
_lowerCamelCase : Any = fxa
return area
if __name__ == "__main__":
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase__ = 10
while i <= 100000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 83
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case : Tuple = None
snake_case : Tuple = logging.get_logger(__name__)
snake_case : List[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case : Dict = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
snake_case : int = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
snake_case : List[str] = """▁"""
class UpperCamelCase__ ( a_):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = BigBirdTokenizer
__UpperCAmelCase = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase = []
def __init__( self : Optional[int] , UpperCamelCase_ : int=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Tuple="<s>" , UpperCamelCase_ : int="</s>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Optional[Any]="[SEP]" , UpperCamelCase_ : int="[MASK]" , UpperCamelCase_ : Tuple="[CLS]" , **UpperCamelCase_ : Optional[Any] , ):
'''simple docstring'''
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
__magic_name__ = vocab_file
__magic_name__ = False if not self.vocab_file else True
def a__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def a__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__magic_name__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 545
| 0
|
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
__a = []
__a = set({"""(""", """[""", """{"""} )
__a = set({""")""", """]""", """}"""} )
__a = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__SCREAMING_SNAKE_CASE ) == 0 or (len(__SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__SCREAMING_SNAKE_CASE ) == 0
def __lowercase ( ) -> List[str]:
"""simple docstring"""
__a = input("""Enter sequence of brackets: """ )
if is_balanced(__SCREAMING_SNAKE_CASE ):
print(__SCREAMING_SNAKE_CASE , """is balanced""" )
else:
print(__SCREAMING_SNAKE_CASE , """is not balanced""" )
if __name__ == "__main__":
main()
| 201
|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
for tf_name, hf_name in patterns:
__a = k.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return k
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
__a = BigBirdPegasusConfig(**__SCREAMING_SNAKE_CASE )
__a = BigBirdPegasusForConditionalGeneration(__SCREAMING_SNAKE_CASE )
__a = torch_model.state_dict()
__a = {}
# separating decoder weights
__a = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
__a = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
__a = [k.endswith(__SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(__SCREAMING_SNAKE_CASE ):
continue
__a = DECODER_PATTERNS
__a = rename_state_dict_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__a = v.T
__a = torch.from_numpy(__SCREAMING_SNAKE_CASE )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
__a = [k.endswith(__SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(__SCREAMING_SNAKE_CASE ):
continue
__a = REMAINING_PATTERNS
__a = rename_state_dict_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__a = v.T
__a = torch.from_numpy(__SCREAMING_SNAKE_CASE )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__a = mapping["""model.embed_positions.weight"""]
__a = mapping.pop("""model.embed_positions.weight""" )
__a , __a = torch_model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
__a = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
__a = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
__a = {}
__a = ["""global_step"""]
for name, shape in tqdm(__SCREAMING_SNAKE_CASE , desc="""converting tf checkpoint to dict""" ):
__a = any(pat in name for pat in ignore_name )
if skip_key:
continue
__a = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = array
return tf_weights
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
__a = get_tf_weights_as_numpy(__SCREAMING_SNAKE_CASE )
__a = convert_bigbird_pegasus(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 201
| 1
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ :Union[str, Any] = 16
a_ :Any = 32
def a ( A__ , A__ , A__ , A__ , A__ = 1_6 ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ : Any = DatasetDict(
{
'''train''': dataset['''train'''].select(A__ ),
'''validation''': dataset['''train'''].select(A__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] = datasets.map(
A__ , batched=A__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Tuple = 8
else:
SCREAMING_SNAKE_CASE__ : int = None
return tokenizer.pad(
A__ , padding='''longest''' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
SCREAMING_SNAKE_CASE__ : Tuple = DataLoader(
tokenized_datasets['''test'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader, test_dataloader
def a ( A__ , A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# Download the dataset
SCREAMING_SNAKE_CASE__ : List[Any] = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
SCREAMING_SNAKE_CASE__ : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config['''lr''']
SCREAMING_SNAKE_CASE__ : List[Any] = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(A__ )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE__ : Optional[Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
SCREAMING_SNAKE_CASE__ : List[str] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = get_fold_dataloaders(
A__ , A__ , A__ , A__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Optional[int] = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=1_0_0 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**A__ )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.loss
SCREAMING_SNAKE_CASE__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**A__ )
SCREAMING_SNAKE_CASE__ : Dict = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A__ , references=A__ , )
SCREAMING_SNAKE_CASE__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A__ )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE__ : List[str] = []
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**A__ )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.logits
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat(A__ , dim=0 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.stack(A__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ : List[str] = metric.compute(predictions=A__ , references=A__ )
accelerator.print('''Average test metrics from all folds:''' , A__ )
def a ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A__ , default=A__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=A__ , default=3 , help='''The number of splits to perform across the dataset''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 35
|
from manim import *
class _A ( UpperCAmelCase_ ):
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : Tuple = Rectangle(height=0.5 , width=0.5 )
__UpperCamelCase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCamelCase : Optional[Any] = Rectangle(height=0.25 , width=0.25 )
__UpperCamelCase : Any = [mem.copy() for i in range(6 )]
__UpperCamelCase : str = [mem.copy() for i in range(6 )]
__UpperCamelCase : int = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : Optional[int] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : Dict = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : List[Any] = Text("""CPU""" , font_size=24 )
__UpperCamelCase : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
__UpperCamelCase : Tuple = [mem.copy() for i in range(4 )]
__UpperCamelCase : Optional[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : Optional[Any] = Text("""GPU""" , font_size=24 )
__UpperCamelCase : Union[str, Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
__UpperCamelCase : Dict = [mem.copy() for i in range(6 )]
__UpperCamelCase : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : Tuple = Text("""Model""" , font_size=24 )
__UpperCamelCase : List[str] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
__UpperCamelCase : Tuple = []
__UpperCamelCase : str = []
for i, rect in enumerate(lowerCamelCase__ ):
__UpperCamelCase : Optional[Any] = fill.copy().set_fill(lowerCamelCase__ , opacity=0.8 )
target.move_to(lowerCamelCase__ )
model_arr.append(lowerCamelCase__ )
__UpperCamelCase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase__ )
self.add(*lowerCamelCase__ , *lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
__UpperCamelCase : Any = [meta_mem.copy() for i in range(6 )]
__UpperCamelCase : int = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : Union[str, Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : str = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : Optional[Any] = Text("""Disk""" , font_size=24 )
__UpperCamelCase : Optional[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCamelCase : Optional[Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase__ )
__UpperCamelCase : Any = MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ) )
__UpperCamelCase : List[str] = Square(0.3 )
input.set_fill(lowerCamelCase__ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase__ , buff=0.5 )
self.play(Write(lowerCamelCase__ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase__ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase__ ) )
self.play(FadeOut(lowerCamelCase__ ) )
__UpperCamelCase : Dict = Arrow(start=lowerCamelCase__ , end=lowerCamelCase__ , color=lowerCamelCase__ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase__ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__UpperCamelCase : int = MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=3 ) )
__UpperCamelCase : Dict = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase__ ) , Circumscribe(model_arr[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__UpperCamelCase : int = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase__ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__UpperCamelCase : Tuple = AnimationGroup(
FadeOut(lowerCamelCase__ , run_time=0.5 ) , MoveToTarget(lowerCamelCase__ , run_time=0.5 ) , FadeIn(lowerCamelCase__ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase__ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__UpperCamelCase : Any = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase__ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase__ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase__ , **lowerCamelCase__ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__UpperCamelCase : str = a_c
__UpperCamelCase : List[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase__ ) , FadeOut(lowerCamelCase__ , run_time=0.5 ) , )
__UpperCamelCase : List[str] = MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=3 ) , MoveToTarget(lowerCamelCase__ ) )
self.wait()
| 269
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = 42
A__ = 42
class snake_case__ ( nn.Module ):
A__ = 42
A__ = (16, 32, 96, 256)
A__ = jnp.floataa
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : List[Any] = []
for i in range(len(self.block_out_channels ) - 1 ):
__snake_case : Optional[int] = self.block_out_channels[i]
__snake_case : Tuple = self.block_out_channels[i + 1]
__snake_case : Dict = nn.Conv(
__a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__a )
__snake_case : Optional[Any] = nn.Conv(
__a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__a )
__snake_case : Tuple = blocks
__snake_case : Union[str, Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , __a : List[Any] ) -> int:
'''simple docstring'''
__snake_case : List[Any] = self.conv_in(__a )
__snake_case : int = nn.silu(__a )
for block in self.blocks:
__snake_case : List[Any] = block(__a )
__snake_case : Any = nn.silu(__a )
__snake_case : int = self.conv_out(__a )
return embedding
@flax_register_to_config
class snake_case__ ( nn.Module , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = 32
A__ = 4
A__ = (
'''CrossAttnDownBlock2D''',
'''CrossAttnDownBlock2D''',
'''CrossAttnDownBlock2D''',
'''DownBlock2D''',
)
A__ = False
A__ = (320, 640, 1_280, 1_280)
A__ = 2
A__ = 8
A__ = None
A__ = 1_280
A__ = 0.0
A__ = False
A__ = jnp.floataa
A__ = True
A__ = 0
A__ = '''rgb'''
A__ = (16, 32, 96, 256)
def A_ ( self : str , __a : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
__snake_case : str = (1, self.in_channels, self.sample_size, self.sample_size)
__snake_case : Optional[Any] = jnp.zeros(__a , dtype=jnp.floataa )
__snake_case : int = jnp.ones((1,) , dtype=jnp.intaa )
__snake_case : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__snake_case : str = (1, 3, self.sample_size * 8, self.sample_size * 8)
__snake_case : Optional[int] = jnp.zeros(__a , dtype=jnp.floataa )
__snake_case : int = jax.random.split(__a )
__snake_case : Optional[Any] = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__a , __a , __a , __a , __a )["params"]
def A_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = self.block_out_channels
__snake_case : int = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__snake_case : Optional[int] = self.num_attention_heads or self.attention_head_dim
# input
__snake_case : int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__snake_case : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__snake_case : Optional[Any] = FlaxTimestepEmbedding(__a , dtype=self.dtype )
__snake_case : Any = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__snake_case : Union[str, Any] = self.only_cross_attention
if isinstance(__a , __a ):
__snake_case : int = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__a , __a ):
__snake_case : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
__snake_case : Dict = []
__snake_case : List[str] = []
__snake_case : List[Any] = block_out_channels[0]
__snake_case : Dict = nn.Conv(
__a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__a )
for i, down_block_type in enumerate(self.down_block_types ):
__snake_case : Dict = output_channel
__snake_case : Optional[int] = block_out_channels[i]
__snake_case : List[str] = i == len(__a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__snake_case : str = FlaxCrossAttnDownBlockaD(
in_channels=__a , out_channels=__a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__snake_case : str = FlaxDownBlockaD(
in_channels=__a , out_channels=__a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__a )
for _ in range(self.layers_per_block ):
__snake_case : List[Any] = nn.Conv(
__a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__a )
if not is_final_block:
__snake_case : Dict = nn.Conv(
__a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__a )
__snake_case : List[str] = down_blocks
__snake_case : Union[str, Any] = controlnet_down_blocks
# mid
__snake_case : Optional[Any] = block_out_channels[-1]
__snake_case : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=__a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__snake_case : Dict = nn.Conv(
__a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : List[Any] , __a : float = 1.0 , __a : bool = True , __a : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
__snake_case : List[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__snake_case : int = jnp.flip(__a , axis=1 )
# 1. time
if not isinstance(__a , jnp.ndarray ):
__snake_case : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__a , jnp.ndarray ) and len(timesteps.shape ) == 0:
__snake_case : Tuple = timesteps.astype(dtype=jnp.floataa )
__snake_case : Dict = jnp.expand_dims(__a , 0 )
__snake_case : List[str] = self.time_proj(__a )
__snake_case : Optional[Any] = self.time_embedding(__a )
# 2. pre-process
__snake_case : Dict = jnp.transpose(__a , (0, 2, 3, 1) )
__snake_case : int = self.conv_in(__a )
__snake_case : List[str] = jnp.transpose(__a , (0, 2, 3, 1) )
__snake_case : Tuple = self.controlnet_cond_embedding(__a )
sample += controlnet_cond
# 3. down
__snake_case : str = (sample,)
for down_block in self.down_blocks:
if isinstance(__a , __a ):
__snake_case : str = down_block(__a , __a , __a , deterministic=not train )
else:
__snake_case : Union[str, Any] = down_block(__a , __a , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__snake_case : Union[str, Any] = self.mid_block(__a , __a , __a , deterministic=not train )
# 5. contronet blocks
__snake_case : int = ()
for down_block_res_sample, controlnet_block in zip(__a , self.controlnet_down_blocks ):
__snake_case : Any = controlnet_block(__a )
controlnet_down_block_res_samples += (down_block_res_sample,)
__snake_case : int = controlnet_down_block_res_samples
__snake_case : Union[str, Any] = self.controlnet_mid_block(__a )
# 6. scaling
__snake_case : str = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__a , mid_block_res_sample=__a )
| 700
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ) -> Dict:
# Initialise PyTorch model
__snake_case : Tuple = BertConfig.from_json_file(_UpperCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
__snake_case : Optional[int] = BertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 124
| 0
|
from typing import Any
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
_validation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# Creates data structures and fill initial step
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for state in states_space:
SCREAMING_SNAKE_CASE = observations_space[0]
SCREAMING_SNAKE_CASE = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_UpperCAmelCase)):
SCREAMING_SNAKE_CASE = observations_space[o]
SCREAMING_SNAKE_CASE = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = -1
for k_state in states_space:
SCREAMING_SNAKE_CASE = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
SCREAMING_SNAKE_CASE = probability
SCREAMING_SNAKE_CASE = k_state
# Update probabilities and pointers dicts
SCREAMING_SNAKE_CASE = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE = arg_max
# The final observation
SCREAMING_SNAKE_CASE = observations_space[len(_UpperCAmelCase) - 1]
# argmax for given final observation
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = -1
for k_state in states_space:
SCREAMING_SNAKE_CASE = probabilities[(k_state, final_observation)]
if probability > max_probability:
SCREAMING_SNAKE_CASE = probability
SCREAMING_SNAKE_CASE = k_state
SCREAMING_SNAKE_CASE = arg_max
# Process pointers backwards
SCREAMING_SNAKE_CASE = last_state
SCREAMING_SNAKE_CASE = []
for o in range(len(_UpperCAmelCase) - 1 , -1 , -1):
result.append(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
_validate_not_empty(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
_validate_lists(_UpperCAmelCase , _UpperCAmelCase)
_validate_dicts(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError('There\'s an empty parameter')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
_validate_list(_UpperCAmelCase , 'observations_space')
_validate_list(_UpperCAmelCase , 'states_space')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if not isinstance(_object , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = F'''{var_name} must be a list'''
raise ValueError(_UpperCAmelCase)
else:
for x in _object:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = F'''{var_name} must be a list of strings'''
raise ValueError(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
_validate_dict(_UpperCAmelCase , 'initial_probabilities' , _UpperCAmelCase)
_validate_nested_dict(_UpperCAmelCase , 'transition_probabilities')
_validate_nested_dict(_UpperCAmelCase , 'emission_probabilities')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
_validate_dict(_object , _UpperCAmelCase , _UpperCAmelCase)
for x in _object.values():
_validate_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False):
if not isinstance(_object , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = F'''{var_name} must be a dict'''
raise ValueError(_UpperCAmelCase)
if not all(isinstance(_UpperCAmelCase , _UpperCAmelCase) for x in _object):
SCREAMING_SNAKE_CASE = F'''{var_name} all keys must be strings'''
raise ValueError(_UpperCAmelCase)
if not all(isinstance(_UpperCAmelCase , _UpperCAmelCase) for x in _object.values()):
SCREAMING_SNAKE_CASE = 'nested dictionary ' if nested else ''
SCREAMING_SNAKE_CASE = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(_UpperCAmelCase)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 73
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase = logging.get_logger(__name__)
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : str = """linear"""
lowerCamelCase_ : int = """cosine"""
lowerCamelCase_ : str = """cosine_with_restarts"""
lowerCamelCase_ : Union[str, Any] = """polynomial"""
lowerCamelCase_ : Tuple = """constant"""
lowerCamelCase_ : List[Any] = """constant_with_warmup"""
lowerCamelCase_ : Optional[int] = """piecewise_constant"""
def lowerCamelCase_ ( _lowercase , _lowercase = -1 ) -> Tuple:
return LambdaLR(_lowercase , lambda _lowercase : 1 , last_epoch=_lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase = -1 ) -> Union[str, Any]:
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1.0 , _lowercase ) )
return 1.0
return LambdaLR(_lowercase , _lowercase , last_epoch=_lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase = -1 ) -> Any:
__A : str = {}
__A : str = step_rules.split("," )
for rule_str in rule_list[:-1]:
__A , __A : List[Any] = rule_str.split(":" )
__A : Dict = int(_lowercase )
__A : Dict = float(_lowercase )
__A : List[Any] = value
__A : List[Any] = float(rule_list[-1] )
def create_rules_function(_lowercase , _lowercase ):
def rule_func(_lowercase ) -> float:
__A : Optional[int] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__A : Dict = create_rules_function(_lowercase , _lowercase )
return LambdaLR(_lowercase , _lowercase , last_epoch=_lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase=-1 ) -> Dict:
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase = 0.5 , _lowercase = -1 ) -> int:
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
__A : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowercase ) * 2.0 * progress )) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase = 1 , _lowercase = -1 ) -> Tuple:
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
__A : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowercase ) * progress) % 1.0) )) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase=1E-7 , _lowercase=1.0 , _lowercase=-1 ) -> Optional[Any]:
__A : str = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__A : Optional[Any] = lr_init - lr_end
__A : List[Any] = num_training_steps - num_warmup_steps
__A : Tuple = 1 - (current_step - num_warmup_steps) / decay_steps
__A : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowercase , _lowercase , _lowercase )
UpperCamelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = 1 , _lowercase = 1.0 , _lowercase = -1 , ) -> Dict:
__A : List[str] = SchedulerType(_lowercase )
__A : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowercase , last_epoch=_lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowercase , step_rules=_lowercase , last_epoch=_lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowercase , num_warmup_steps=_lowercase , last_epoch=_lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , num_cycles=_lowercase , last_epoch=_lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , power=_lowercase , last_epoch=_lowercase , )
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , last_epoch=_lowercase )
| 520
| 0
|
"""simple docstring"""
def a__ ( snake_case__ ) -> int:
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
lowerCamelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 533
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : Optional[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
for attribute in key.split(""".""" ):
lowerCamelCase = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
lowerCamelCase = getattr(snake_case__ , snake_case__ ).shape
else:
lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCamelCase = value
elif weight_type == "weight_g":
lowerCamelCase = value
elif weight_type == "weight_v":
lowerCamelCase = value
elif weight_type == "bias":
lowerCamelCase = value
else:
lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
lowerCamelCase = []
lowerCamelCase = fairseq_model.state_dict()
lowerCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCamelCase = None
for name, value in fairseq_dict.items():
lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCamelCase = True
elif name.split(""".""" )[0] == "proj":
lowerCamelCase = fairseq_model.proj
lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCamelCase = True
if "*" in mapped_key:
lowerCamelCase = name.split(snake_case__ )[0].split(""".""" )[-2]
lowerCamelCase = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
lowerCamelCase = """weight_g"""
elif "weight_v" in name:
lowerCamelCase = """weight_v"""
elif "bias" in name:
lowerCamelCase = """bias"""
elif "weight" in name:
lowerCamelCase = """weight"""
else:
lowerCamelCase = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F'Unused weights: {unused_weights}' )
return proj_weight
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = full_name.split("""conv_layers.""" )[-1]
lowerCamelCase = name.split(""".""" )
lowerCamelCase = int(items[0] )
lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
def a__ ( snake_case__ ) -> Tuple:
lowerCamelCase , lowerCamelCase = emb.weight.shape
lowerCamelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCamelCase = emb.weight.data
return lin_layer
def a__ ( snake_case__ ) -> Optional[int]:
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase = f.readlines()
lowerCamelCase = [line.split(""" """ )[0] for line in lines]
lowerCamelCase = len(snake_case__ )
lowerCamelCase = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Optional[int]:
lowerCamelCase = WavaVecaConfig.from_pretrained(snake_case__ )
lowerCamelCase = SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
lowerCamelCase , lowerCamelCase , lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowerCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
lowerCamelCase = WavaVecaModel(snake_case__ )
lowerCamelCase = recursively_load_weights_wavaveca(model.encoder , snake_case__ )
lowerCamelCase = SpeechaTextaForCausalLM(snake_case__ )
lowerCamelCase , lowerCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowerCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowerCamelCase = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
lowerCamelCase = False
# add projection layer
lowerCamelCase = nn.Parameter(projection_layer.weight )
lowerCamelCase = nn.Parameter(projection_layer.bias )
lowerCamelCase = create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(snake_case__ , snake_case__ )
lowerCamelCase = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) )
tokenizer.save_pretrained(snake_case__ )
lowerCamelCase = hf_wavavec.config.to_dict()
lowerCamelCase = tokenizer.pad_token_id
lowerCamelCase = tokenizer.bos_token_id
lowerCamelCase = tokenizer.eos_token_id
lowerCamelCase = """speech_to_text_2"""
lowerCamelCase = """wav2vec2"""
lowerCamelCase = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
lowerCAmelCase : str = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 533
| 1
|
"""simple docstring"""
from collections import Counter
from timeit import timeit
def __UpperCAmelCase ( __lowerCamelCase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def __UpperCAmelCase ( __lowerCamelCase = "" ) -> bool:
if len(__lowerCamelCase ) == 0:
return True
lowercase__ : Dict = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase__ : dict[str, int] = {}
for character in lower_case_input_str:
lowercase__ : Any = character_freq_dict.get(__lowerCamelCase , 0 ) + 1
lowercase__ : Optional[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __UpperCAmelCase ( __lowerCamelCase = "" ) -> None:
print('''\nFor string = ''' , __lowerCamelCase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__lowerCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__lowerCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
lowerCAmelCase_ = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
lowerCAmelCase_ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 560
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : int
lowerCAmelCase : TreeNode | None = None
lowerCAmelCase : TreeNode | None = None
lowerCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
if root is None:
return 0
# Validation
def count_nodes(__lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__lowerCamelCase ) != count_coins(__lowerCamelCase ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase__ , lowercase__ : Optional[Any] = get_distrib(node.left )
lowercase__ , lowercase__ : Tuple = get_distrib(node.right )
lowercase__ : List[Any] = 1 - left_distrib_excess
lowercase__ : Any = 1 - right_distrib_excess
lowercase__ : List[str] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__lowerCamelCase )
+ abs(__lowerCamelCase )
)
lowercase__ : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__lowerCamelCase , __lowerCamelCase )
return get_distrib(__lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 560
| 1
|
from itertools import count
def a__ ( _UpperCamelCase : int = 50 ):
__lowerCamelCase = [1] * min_block_length
for n in count(_UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCamelCase ,n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 622
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[str] ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : Any=None ):
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(_UpperCamelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
lowerCAmelCase__ = OPTConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = """gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = embed_dim
__lowerCamelCase = word_embed_proj_dim
__lowerCamelCase = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__UpperCAmelCase , **self.config_updates , )
__lowerCamelCase = prepare_opt_inputs_dict(__UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel(config=__UpperCAmelCase )
__lowerCamelCase = inputs_dict['''input_ids''']
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict['''attention_mask'''][:1, :]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCamelCase = model_class(config=__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __UpperCAmelCase )
# check that weights remain the same after resizing
__lowerCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __UpperCAmelCase )
__lowerCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
def a__ ( _UpperCamelCase : Optional[Any] ):
return tf.constant(_UpperCamelCase ,dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCamelCase = input_ids.shape[0]
__lowerCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowerCamelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCamelCase = tf.not_equal(__UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
__lowerCamelCase = model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase ).last_hidden_state
__lowerCamelCase = (1, 11, 512)
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-3 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = xla_generate(__UpperCAmelCase , __UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = '''facebook/opt-350m'''
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCamelCase = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-125m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = '''left'''
# use different length sentences to test batching
__lowerCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase )
__lowerCamelCase = inputs['''input_ids''']
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs['''attention_mask'''] )
__lowerCamelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase )
__lowerCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowerCamelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 622
| 1
|
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24
|
from __future__ import annotations
from typing import Any
class A( UpperCamelCase ):
'''simple docstring'''
pass
class A:
'''simple docstring'''
def __init__( self : List[str] , A_ : Any ) -> None:
"""simple docstring"""
lowerCamelCase_ = data
lowerCamelCase_ = None
def __iter__( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self
lowerCamelCase_ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(A_ )
yield node.data
lowerCamelCase_ = node.next_node
@property
def a__ ( self : List[str] ) -> bool:
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCamelCase : int = Node(1)
lowerCamelCase : Optional[int] = Node(2)
lowerCamelCase : Union[str, Any] = Node(3)
lowerCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
lowerCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
lowerCamelCase : Dict = Node(5)
lowerCamelCase : Optional[int] = Node(6)
lowerCamelCase : str = Node(5)
lowerCamelCase : Union[str, Any] = Node(6)
print(root_node.has_loop) # False
lowerCamelCase : List[str] = Node(1)
print(root_node.has_loop) # False
| 70
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowercase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: List[str]
_lowerCamelCase: Optional[List[str]]
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: List[int]
_lowerCamelCase: List[int]
_lowerCamelCase: Optional[List[int]] = None
_lowerCamelCase: Optional[List[int]] = None
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = '''train'''
_lowerCamelCase: Tuple = '''dev'''
_lowerCamelCase: Optional[Any] = '''test'''
class lowerCAmelCase_ :
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[str] ,A_ : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[InputExample] ,A_ : List[str] ,A_ : int ,A_ : PreTrainedTokenizer ,A_ : int=False ,A_ : Optional[int]="[CLS]" ,A_ : Union[str, Any]=1 ,A_ : Union[str, Any]="[SEP]" ,A_ : int=False ,A_ : List[Any]=False ,A_ : Any=0 ,A_ : List[str]=0 ,A_ : int=-100 ,A_ : List[str]=0 ,A_ : Tuple=True ,) -> List[InputFeatures]:
A = {label: i for i, label in enumerate(A_ )}
A = []
for ex_index, example in enumerate(A_ ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d of %d' ,A_ ,len(A_ ) )
A = []
A = []
for word, label in zip(example.words ,example.labels ):
A = tokenizer.tokenize(A_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A_ ) > 0:
tokens.extend(A_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
A = tokenizer.num_special_tokens_to_add()
if len(A_ ) > max_seq_length - special_tokens_count:
A = tokens[: (max_seq_length - special_tokens_count)]
A = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
A = [sequence_a_segment_id] * len(A_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
A = [cls_token] + tokens
A = [pad_token_label_id] + label_ids
A = [cls_token_segment_id] + segment_ids
A = tokenizer.convert_tokens_to_ids(A_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
A = [1 if mask_padding_with_zero else 0] * len(A_ )
# Zero-pad up to the sequence length.
A = max_seq_length - len(A_ )
if pad_on_left:
A = ([pad_token] * padding_length) + input_ids
A = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
A = ([pad_token_segment_id] * padding_length) + segment_ids
A = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' ,example.guid )
logger.info('tokens: %s' ,' '.join([str(A_ ) for x in tokens] ) )
logger.info('input_ids: %s' ,' '.join([str(A_ ) for x in input_ids] ) )
logger.info('input_mask: %s' ,' '.join([str(A_ ) for x in input_mask] ) )
logger.info('segment_ids: %s' ,' '.join([str(A_ ) for x in segment_ids] ) )
logger.info('label_ids: %s' ,' '.join([str(A_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
A = None
features.append(
InputFeatures(
input_ids=A_ ,attention_mask=A_ ,token_type_ids=A_ ,label_ids=A_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[InputFeatures]
_lowerCamelCase: int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Tuple ,A_ : TokenClassificationTask ,A_ : str ,A_ : PreTrainedTokenizer ,A_ : List[str] ,A_ : str ,A_ : Optional[int] = None ,A_ : Optional[int]=False ,A_ : Split = Split.train ,) -> Dict:
# Load data features from cache or dataset file
A = os.path.join(
A_ ,'cached_{}_{}_{}'.format(mode.value ,tokenizer.__class__.__name__ ,str(A_ ) ) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A = cached_features_file + '.lock'
with FileLock(A_ ):
if os.path.exists(A_ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
A = torch.load(A_ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
A = token_classification_task.read_examples_from_file(A_ ,A_ )
# TODO clean up all this to leverage built-in features of tokenizers
A = token_classification_task.convert_examples_to_features(
A_ ,A_ ,A_ ,A_ ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=A_ ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features ,A_ )
def __len__( self : Optional[int] ) -> List[str]:
return len(self.features )
def __getitem__( self : List[Any] ,A_ : Optional[int] ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: List[InputFeatures]
_lowerCamelCase: int = -100
def __init__( self : Any ,A_ : TokenClassificationTask ,A_ : str ,A_ : PreTrainedTokenizer ,A_ : List[str] ,A_ : str ,A_ : Optional[int] = None ,A_ : str=False ,A_ : Split = Split.train ,) -> Tuple:
A = token_classification_task.read_examples_from_file(A_ ,A_ )
# TODO clean up all this to leverage built-in features of tokenizers
A = token_classification_task.convert_examples_to_features(
A_ ,A_ ,A_ ,A_ ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=A_ ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
A = tf.data.Dataset.from_generator(
A_ ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) ,(
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) ,)
else:
A = tf.data.Dataset.from_generator(
A_ ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) ,(
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) ,)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Optional[Any] ) -> str:
return len(self.features )
def __getitem__( self : Union[str, Any] ,A_ : Optional[Any] ) -> InputFeatures:
return self.features[i]
| 22
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 22
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
SCREAMING_SNAKE_CASE : Optional[Any] = ""
SCREAMING_SNAKE_CASE : Tuple = ""
SCREAMING_SNAKE_CASE : str = ""
SCREAMING_SNAKE_CASE : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def lowerCamelCase_ ( ):
A_ , A_ = get_dataset(_lowercase , _lowercase )
print('''Processing...''' )
A_ , A_ , A_ = update_image_and_anno(_lowercase , _lowercase , _lowercase )
for index, image in enumerate(_lowercase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A_ = random_chars(32 )
A_ = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
A_ = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , _lowercase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(_lowercase )} with {file_name}" )
A_ = []
for anno in new_annos[index]:
A_ = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(_lowercase )
with open(F"/{file_root}.txt" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
A_ = []
A_ = []
for label_file in glob.glob(os.path.join(_lowercase , '''*.txt''' ) ):
A_ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(_lowercase ) as in_file:
A_ = in_file.readlines()
A_ = os.path.join(_lowercase , F"{label_name}.jpg" )
A_ = []
for obj_list in obj_lists:
A_ = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowercase )
labels.append(_lowercase )
return img_paths, labels
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 ):
A_ = []
A_ = []
A_ = []
for idx in range(len(_lowercase ) ):
A_ = []
A_ = img_list[idx]
path_list.append(_lowercase )
A_ = anno_list[idx]
A_ = cva.imread(_lowercase )
if flip_type == 1:
A_ = cva.flip(_lowercase , _lowercase )
for bbox in img_annos:
A_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
A_ = cva.flip(_lowercase , _lowercase )
for bbox in img_annos:
A_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowercase )
new_imgs_list.append(_lowercase )
return new_imgs_list, new_annos_lists, path_list
def lowerCamelCase_ ( __UpperCamelCase = 32 ):
assert number_char > 1, "The number of character should greater than 1"
A_ = ascii_lowercase + digits
return "".join(random.choice(_lowercase ) for _ in range(_lowercase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 141
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCamelCase (_a ):
_lowercase = """bert"""
def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 1
| 0
|
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__UpperCAmelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _snake_case ( A ) -> list[float]:
lowerCAmelCase__ = []
lowerCAmelCase__ = len(A )
for i in range(A ):
lowerCAmelCase__ = -1
for j in range(i + 1 , A ):
if arr[i] < arr[j]:
lowerCAmelCase__ = arr[j]
break
result.append(A )
return result
def _snake_case ( A ) -> list[float]:
lowerCAmelCase__ = []
for i, outer in enumerate(A ):
lowerCAmelCase__ = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCAmelCase__ = inner
break
result.append(A )
return result
def _snake_case ( A ) -> list[float]:
lowerCAmelCase__ = len(A )
lowerCAmelCase__ = []
lowerCAmelCase__ = [-1] * arr_size
for index in reversed(range(A ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCAmelCase__ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__UpperCAmelCase = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 98
|
'''simple docstring'''
def _snake_case ( A , A ) -> bool:
lowerCAmelCase__ = len(A ) + 1
lowerCAmelCase__ = len(A ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase__ = [[0 for i in range(A )] for j in range(A )]
# since string of zero length match pattern of zero length
lowerCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , A ):
lowerCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , A ):
lowerCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , A ):
for j in range(1 , A ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase__ = dp[i - 1][j]
else:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__UpperCAmelCase = '''aab'''
__UpperCAmelCase = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 98
| 1
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__lowerCAmelCase = logging.getLogger()
def _lowercase ( a__ : int ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(UpperCamelCase_ , "all_results.json" )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" ) as f:
_UpperCamelCase = json.load(UpperCamelCase_ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
__lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCamelCase_ ( A__ ):
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
import xla_spawn
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f'''\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '''.split()
with patch.object(a__ , "argv" , a__ ):
_UpperCamelCase = time()
xla_spawn.main()
_UpperCamelCase = time()
_UpperCamelCase = get_results(a__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def lowercase ( self ) -> Any:
"""simple docstring"""
import xla_spawn
_UpperCamelCase = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(a__ , "argv" , a__ ):
xla_spawn.main()
| 147
|
from manim import *
class _UpperCAmelCase ( A__ ):
def snake_case_ ( self):
A__ = Rectangle(height=0.5 , width=0.5)
A__ = Rectangle(height=0.2_5 , width=0.2_5)
A__ = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0)
A__ = [mem.copy() for i in range(6)]
A__ = [mem.copy() for i in range(6)]
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = VGroup(a__ , a__).arrange(a__ , buff=0)
A__ = Text('''CPU''' , font_size=2_4)
A__ = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
cpu.move_to([-2.5, -0.5, 0])
self.add(a__)
A__ = [mem.copy() for i in range(4)]
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = Text('''GPU''' , font_size=2_4)
A__ = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
gpu.move_to([-1, -1, 0])
self.add(a__)
A__ = [mem.copy() for i in range(6)]
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = Text('''Model''' , font_size=2_4)
A__ = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
model.move_to([3, -1.0, 0])
self.add(a__)
A__ = []
A__ = []
A__ = []
for i, rect in enumerate(a__):
rect.set_stroke(a__)
A__ = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3).set_stroke(width=0.0).set_fill(a__ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=a__)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a__ , buff=0.0)
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a__ , buff=0.0)
self.add(a__)
model_cpu_arr.append(a__)
self.add(*a__ , *a__ , *a__)
A__ = [mem.copy() for i in range(6)]
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = Text('''Loaded Checkpoint''' , font_size=2_4)
A__ = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
checkpoint.move_to([3, 0.5, 0])
self.add(a__)
A__ = []
A__ = []
for i, rect in enumerate(a__):
A__ = fill.copy().set_fill(a__ , opacity=0.7)
target.move_to(a__)
ckpt_arr.append(a__)
A__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.move_to(cpu_right_col_base[i - 5])
ckpt_cpu_arr.append(a__)
self.add(*a__ , *a__)
A__ = Square(side_length=2.2)
key.move_to([-5, 2, 0])
A__ = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0])
self.add(a__ , a__)
A__ = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left())
self.add(a__)
A__ = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=2_4 , )
step_a.move_to([2, 2, 0])
A__ = [meta_mem.copy() for i in range(6)]
A__ = [meta_mem.copy() for i in range(6)]
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = VGroup(*a__).arrange(a__ , buff=0)
A__ = VGroup(a__ , a__).arrange(a__ , buff=0)
A__ = Text('''Disk''' , font_size=2_4)
A__ = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
disk.move_to([-4.0, -1.2_5, 0])
self.play(Write(a__ , run_time=3) , Write(a__ , run_time=1) , Create(a__ , run_time=1))
A__ = []
for i, rect in enumerate(a__):
A__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i]).scale(0.5)
animations.append(MoveToTarget(a__ , run_time=1.5))
self.play(*a__)
self.play(FadeOut(a__))
A__ = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=2_4)
step_a.move_to([2, 2, 0])
self.play(Write(a__ , run_time=3))
self.play(
FadeOut(a__ , a__ , *a__ , *a__) , )
self.wait()
| 632
| 0
|
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 421
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Any = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_snake_case : Union[str, Any] = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_snake_case : List[str] = 0
_snake_case : int = 0XE000
_snake_case : Optional[int] = 0XE001
_snake_case : Dict = 0XE002
_snake_case : str = 0XE003
_snake_case : Optional[int] = 0XE004
# Maps special codepoints to human-readable names.
_snake_case : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_snake_case : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , lowerCAmelCase_ : Dict=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : Optional[Any]=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : str=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : Union[str, Any]=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : Dict=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : int=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=2_0_4_8 , **lowerCAmelCase_ : Optional[int] , ) -> Optional[Any]:
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , model_max_length=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
__lowerCAmelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__lowerCAmelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__lowerCAmelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__lowerCAmelCase = UNICODE_VOCAB_SIZE
__lowerCAmelCase = len(self._special_codepoints )
@property
def lowercase ( self : str ) -> int:
return self._unicode_vocab_size
def lowercase ( self : List[str] , lowerCAmelCase_ : str ) -> List[str]:
return list(lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> int:
try:
return ord(lowerCAmelCase_ )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def lowercase ( self : List[str] , lowerCAmelCase_ : int ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCAmelCase_ )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Dict ) -> Union[str, Any]:
return "".join(lowerCAmelCase_ )
def lowercase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [1] + ([0] * len(lowerCAmelCase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCAmelCase_ )) + [1]
return result
def lowercase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> int:
return ()
| 421
| 1
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : int ) -> Tuple:
"""simple docstring"""
if gpta_config_file == "":
lowercase__ = GPTaConfig()
else:
lowercase__ = GPTaConfig.from_json_file(__magic_name__ )
lowercase__ = GPTaModel(__magic_name__ )
# Load weights from numpy
load_tf_weights_in_gpta(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
lowercase__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowercase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __magic_name__ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
A : List[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 15
|
import argparse
import json
from tqdm import tqdm
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
__UpperCamelCase :Any = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__UpperCamelCase :Any = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[int] = dpr_record['''question''']
__UpperCamelCase :Optional[int] = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 167
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Dict = KandinskyInpaintPipeline
SCREAMING_SNAKE_CASE : Dict = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE : int = False
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE_ : List[Any] = MultilingualCLIP(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_movq
SCREAMING_SNAKE_CASE_ : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='epsilon' , thresholding=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_SCREAMING_SNAKE_CASE )
# create init_image
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : str = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE_ : int = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : int = 0
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 'cpu'
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : str = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def UpperCAmelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
SCREAMING_SNAKE_CASE_ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
SCREAMING_SNAKE_CASE_ : int = np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'a hat'
SCREAMING_SNAKE_CASE_ : List[str] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
SCREAMING_SNAKE_CASE_ : Any = pipeline(
_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
SCREAMING_SNAKE_CASE_ : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 353
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = TaConfig.from_json_file(a )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_ : Tuple = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tf_weights_in_ta(a , a , a )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 353
| 1
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowercase: Union[str, Any], lowercase: List[Any], lowercase: List[Any], lowercase: Dict ) -> List[Any]:
# Initialise PyTorch model
A : Any =BigBirdConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
A : List[Any] =BigBirdForQuestionAnswering(lowercase )
else:
A : int =BigBirdForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(lowercase, lowercase, is_trivia_qa=lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase )
if __name__ == "__main__":
_lowercase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
_lowercase : Union[str, Any] =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 305
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowercase : Optional[Any] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : uuid.UUID = None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> Any:
if not conversation_id:
A : int =uuid.uuida()
if past_user_inputs is None:
A : Tuple =[]
if generated_responses is None:
A : Any =[]
A : uuid.UUID =conversation_id
A : List[str] =past_user_inputs
A : List[str] =generated_responses
A : Optional[str] =text
def __eq__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False ) -> Any:
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
A : Union[str, Any] =text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
A : List[str] =text
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
A : List[str] =None
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.generated_responses.append(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Optional[Any] ) -> List[str]:
A : int =f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
A : List[str] ='user' if is_user else 'bot'
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase_ , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.tokenizer.pad_token_id is None:
A : Optional[int] =self.tokenizer.eos_token
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : str ) -> int:
A : List[Any] ={}
A : str ={}
A : Any ={}
if min_length_for_response is not None:
A : Dict =min_length_for_response
if minimum_tokens is not None:
A : Union[str, Any] =minimum_tokens
if "max_length" in generate_kwargs:
A : str =generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
A : Dict =clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(SCREAMING_SNAKE_CASE__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[Conversation, List[Conversation]] , SCREAMING_SNAKE_CASE__ : Tuple=0 , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
A : int =super().__call__(SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) == 1:
return outputs[0]
return outputs
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Conversation , SCREAMING_SNAKE_CASE__ : Optional[int]=32 ) -> Dict[str, Any]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
A : str =self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
A : Any =self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE__ )
if self.framework == "pt":
A : str =torch.LongTensor([input_ids] )
elif self.framework == "tf":
A : List[Any] =tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=10 , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
A : List[Any] =generate_kwargs.get('max_length' , self.model.config.max_length )
A : Any =model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
A : Dict =max_length - minimum_tokens
A : Dict =model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
A : int =model_inputs['attention_mask'][:, -trim:]
A : Union[str, Any] =model_inputs.pop('conversation' )
A : Optional[int] =max_length
A : str =self.model.generate(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.model.config.is_encoder_decoder:
A : str =1
else:
A : Any =n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=True ) -> Optional[Any]:
A : Any =model_outputs['output_ids']
A : Dict =self.tokenizer.decode(
output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ , )
A : int =model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(SCREAMING_SNAKE_CASE__ )
return conversation
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Conversation ) -> Dict:
A : List[str] =self.tokenizer.eos_token_id
A : str =[]
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > self.tokenizer.model_max_length:
A : Dict =input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 305
| 1
|
"""simple docstring"""
def __A ( a_ :int , a_ :int) -> int:
return int((input_a, input_a).count(1) != 0)
def __A ( ) -> None:
assert or_gate(0 , 0) == 0
assert or_gate(0 , 1) == 1
assert or_gate(1 , 0) == 1
assert or_gate(1 , 1) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 101
|
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
A = 300 # TEMPERATURE (unit = K)
def __A ( a_ :float , a_ :float , a_ :float , ) -> float:
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''')
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''')
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=__lowerCamelCase ):
snake_case_ = ["""onnx"""]
def __init__( self : int ,*A : List[str] ,**A : int ):
'''simple docstring'''
requires_backends(self ,["""onnx"""] )
@classmethod
def __lowercase ( cls : Optional[Any] ,*A : List[str] ,**A : Dict ):
'''simple docstring'''
requires_backends(cls ,["""onnx"""] )
@classmethod
def __lowercase ( cls : List[Any] ,*A : Optional[int] ,**A : int ):
'''simple docstring'''
requires_backends(cls ,["""onnx"""] )
| 65
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Tuple = '''▁'''
A : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A : str = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
A : Any = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
A : Dict = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_VOCAB_FILES_MAP
a = ["input_ids", "attention_mask"]
a = []
a = []
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE : Dict="<s>" , SCREAMING_SNAKE_CASE : int="<unk>" , SCREAMING_SNAKE_CASE : Tuple="<pad>" , SCREAMING_SNAKE_CASE : List[str]="<mask>" , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : str , ):
# Mask token behave like a normal word, i.e. include the space before it
_A : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else mask_token
_A : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
_A : Optional[int] = kwargs.get('additional_special_tokens' , [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=SCREAMING_SNAKE_CASE , tgt_lang=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE))
_A : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_A : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A : List[Any] = 1
_A : List[Any] = len(self.sp_model)
_A : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE)
}
_A : Dict = {v: k for k, v in self.lang_code_to_id.items()}
_A : List[str] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
_A : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_A : str = src_lang if src_lang is not None else 'en_XX'
_A : Any = self.lang_code_to_id[self._src_lang]
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def A ( self : Optional[Any]):
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A ( self : Union[str, Any]):
return self._src_lang
@src_lang.setter
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Dict):
_A : int = self.__dict__.copy()
_A : Optional[Any] = None
return state
def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict):
_A : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_A : Any = {}
_A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A ( self : Any):
_A : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A ( self : int , SCREAMING_SNAKE_CASE : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE)
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A : str = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any]):
_A : Tuple = []
_A : Any = ''
_A : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) + token
_A : Any = True
_A : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE)
_A : Tuple = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE)
return out_string.strip()
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
_A : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE , 'wb') as fi:
_A : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
def A ( self : int , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE)
_A : str = [1] * len(self.prefix_tokens)
_A : Optional[Any] = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] , SCREAMING_SNAKE_CASE : Optional[str] , **SCREAMING_SNAKE_CASE : str):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
_A : Optional[Any] = src_lang
_A : Optional[int] = self(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : List[str] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE)
_A : List[str] = tgt_lang_id
return inputs
def A ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str = "en_XX" , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , SCREAMING_SNAKE_CASE : str = "ro_RO" , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
_A : Any = src_lang
_A : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
return self.set_src_lang_special_tokens(self.src_lang)
def A ( self : Any):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : str):
_A : Optional[int] = self.lang_code_to_id[src_lang]
_A : Dict = [self.cur_lang_code_id]
_A : List[str] = [self.eos_token_id]
def A ( self : int , SCREAMING_SNAKE_CASE : str):
_A : str = self.lang_code_to_id[tgt_lang]
_A : int = [self.cur_lang_code_id]
_A : str = [self.eos_token_id]
| 128
| 0
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__ ( snake_case_ :Dict , snake_case_ :str , snake_case_ :Any ):
__UpperCAmelCase = 1.5
__UpperCAmelCase = int(factor * num_class_images )
__UpperCAmelCase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case_ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=snake_case_ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
__UpperCAmelCase = client.query(text=snake_case_ )
if len(snake_case_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__UpperCAmelCase = int(factor * num_images )
__UpperCAmelCase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case_ , aesthetic_weight=0.1 , )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = tqdm(desc='''downloading real regularization images''' , total=snake_case_ )
with open(F'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(F'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open(
F'''{class_data_dir}/images.txt''' , '''w''' ) as fa:
while total < num_class_images:
__UpperCAmelCase = class_images[count]
count += 1
try:
__UpperCAmelCase = requests.get(images['''url'''] )
if img.status_code == 200:
__UpperCAmelCase = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser('''''' , add_help=snake_case_ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=snake_case_ , type=snake_case_ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=snake_case_ , type=snake_case_ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=snake_case_ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 715
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Any = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "codegen"
a__ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , _lowercase : Optional[int]=5_04_00 , _lowercase : List[str]=20_48 , _lowercase : Optional[int]=20_48 , _lowercase : Tuple=40_96 , _lowercase : Optional[Any]=28 , _lowercase : Tuple=16 , _lowercase : str=64 , _lowercase : Dict=None , _lowercase : Any="gelu_new" , _lowercase : Any=0.0 , _lowercase : Dict=0.0 , _lowercase : Dict=0.0 , _lowercase : str=1E-5 , _lowercase : Union[str, Any]=0.02 , _lowercase : List[str]=True , _lowercase : Dict=5_02_56 , _lowercase : str=5_02_56 , _lowercase : Any=False , **_lowercase : Optional[Any] , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = n_ctx
__UpperCAmelCase = n_positions
__UpperCAmelCase = n_embd
__UpperCAmelCase = n_layer
__UpperCAmelCase = n_head
__UpperCAmelCase = n_inner
__UpperCAmelCase = rotary_dim
__UpperCAmelCase = activation_function
__UpperCAmelCase = resid_pdrop
__UpperCAmelCase = embd_pdrop
__UpperCAmelCase = attn_pdrop
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_range
__UpperCAmelCase = use_cache
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : int , _lowercase : PretrainedConfig , _lowercase : str = "default" , _lowercase : List[PatchingSpec] = None , _lowercase : bool = False , ):
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , '''pad_token_id''' , _lowercase ):
# TODO: how to do that better?
__UpperCAmelCase = 0
@property
def a ( self : Optional[Any] ):
__UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
__UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a ( self : Dict ):
return self._config.n_layer
@property
def a ( self : Tuple ):
return self._config.n_head
def a ( self : Tuple , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
__UpperCAmelCase = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__UpperCAmelCase , __UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase = seqlen + 2
__UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCAmelCase = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
__UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
__UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
__UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def a ( self : Any ):
return 13
| 397
| 0
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Tuple:
_lowerCamelCase : Optional[int] = checkpoint
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : List[Any] = vae_state_dict['''encoder.conv_in.weight''']
_lowerCamelCase : str = vae_state_dict['''encoder.conv_in.bias''']
_lowerCamelCase : List[Any] = vae_state_dict['''encoder.conv_out.weight''']
_lowerCamelCase : Dict = vae_state_dict['''encoder.conv_out.bias''']
_lowerCamelCase : Dict = vae_state_dict['''encoder.norm_out.weight''']
_lowerCamelCase : Optional[Any] = vae_state_dict['''encoder.norm_out.bias''']
_lowerCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.weight''']
_lowerCamelCase : Union[str, Any] = vae_state_dict['''decoder.conv_in.bias''']
_lowerCamelCase : Any = vae_state_dict['''decoder.conv_out.weight''']
_lowerCamelCase : Optional[Any] = vae_state_dict['''decoder.conv_out.bias''']
_lowerCamelCase : Tuple = vae_state_dict['''decoder.norm_out.weight''']
_lowerCamelCase : List[Any] = vae_state_dict['''decoder.norm_out.bias''']
_lowerCamelCase : str = vae_state_dict['''quant_conv.weight''']
_lowerCamelCase : Tuple = vae_state_dict['''quant_conv.bias''']
_lowerCamelCase : int = vae_state_dict['''post_quant_conv.weight''']
_lowerCamelCase : Optional[Any] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_lowerCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_lowerCamelCase : Optional[Any] = {
layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the decoder up blocks only
_lowerCamelCase : str = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_lowerCamelCase : List[Any] = {
layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase__ )
}
for i in range(UpperCamelCase__ ):
_lowerCamelCase : List[str] = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key]
if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_lowerCamelCase : Tuple = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.weight''' )
_lowerCamelCase : int = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.bias''' )
_lowerCamelCase : Tuple = renew_vae_resnet_paths(UpperCamelCase__ )
_lowerCamelCase : Tuple = {'''old''': F'''down.{i}.block''', '''new''': F'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
_lowerCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_lowerCamelCase : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_lowerCamelCase : Optional[Any] = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key]
_lowerCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase__ )
_lowerCamelCase : List[str] = {'''old''': F'''mid.block_{i}''', '''new''': F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
_lowerCamelCase : Optional[Any] = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_lowerCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase__ )
_lowerCamelCase : List[str] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
conv_attn_to_linear(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
_lowerCamelCase : Optional[int] = num_up_blocks - 1 - i
_lowerCamelCase : List[Any] = [
key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key
]
if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_lowerCamelCase : List[Any] = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.weight'''
]
_lowerCamelCase : Optional[Any] = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.bias'''
]
_lowerCamelCase : Union[str, Any] = renew_vae_resnet_paths(UpperCamelCase__ )
_lowerCamelCase : Optional[int] = {'''old''': F'''up.{block_id}.block''', '''new''': F'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
_lowerCamelCase : Optional[int] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_lowerCamelCase : Tuple = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_lowerCamelCase : str = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key]
_lowerCamelCase : List[Any] = renew_vae_resnet_paths(UpperCamelCase__ )
_lowerCamelCase : Optional[Any] = {'''old''': F'''mid.block_{i}''', '''new''': F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
_lowerCamelCase : int = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_lowerCamelCase : Dict = renew_vae_attention_paths(UpperCamelCase__ )
_lowerCamelCase : Optional[Any] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
conv_attn_to_linear(UpperCamelCase__ )
return new_checkpoint
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) ->Dict:
_lowerCamelCase : Optional[Any] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_lowerCamelCase : Any = io.BytesIO(r.content )
_lowerCamelCase : Union[str, Any] = OmegaConf.load(UpperCamelCase__ )
_lowerCamelCase : Optional[Any] = 512
_lowerCamelCase : Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_lowerCamelCase : List[str] = {}
with safe_open(UpperCamelCase__ , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
_lowerCamelCase : int = f.get_tensor(UpperCamelCase__ )
else:
_lowerCamelCase : Optional[Any] = torch.load(UpperCamelCase__ , map_location=UpperCamelCase__ )['''state_dict''']
# Convert the VAE model.
_lowerCamelCase : int = create_vae_diffusers_config(UpperCamelCase__ , image_size=UpperCamelCase__ )
_lowerCamelCase : List[str] = custom_convert_ldm_vae_checkpoint(UpperCamelCase__ , UpperCamelCase__ )
_lowerCamelCase : Tuple = AutoencoderKL(**UpperCamelCase__ )
vae.load_state_dict(UpperCamelCase__ )
vae.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE__ : str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 434
|
_lowercase : Dict = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 641
| 0
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__a : int = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __a ( cls ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def __a ( cls ) -> int:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : Any = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
lowercase__ : List[Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase , repo_id="test-config" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowercase__ : int = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
lowercase__ : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowercase__ : Optional[Any] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __a ( self ) -> List[Any]:
"""simple docstring"""
CustomConfig.register_for_auto_class()
lowercase__ : Tuple = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __a ( self ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__ : Optional[int] = c.n_embd + 1 # int
lowercase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowercase__ : Optional[Any] = not c.scale_attn_weights # bool
lowercase__ : int = c.summary_type + "foo" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : str = PretrainedConfig()
lowercase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
lowercase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase , lowerCamelCase )]
if len(lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f""" {", ".join(lowerCamelCase )}.""" )
def __a ( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
lowercase__ : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(lowerCamelCase )
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = mock.Mock()
lowercase__ : int = 500
lowercase__ : List[str] = {}
lowercase__ : Any = HTTPError
lowercase__ : int = {}
# Download this model to make sure it's in the cache.
lowercase__ : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCamelCase ) as mock_head:
lowercase__ : str = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Tuple = AutoConfig.from_pretrained("bert-base-cased" )
lowercase__ : List[Any] = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase )
lowercase__ : List[str] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__ : Dict = ["config.42.0.0.json"]
lowercase__ : int = 768
configuration.save_pretrained(lowerCamelCase )
shutil.move(os.path.join(lowerCamelCase , "config.4.0.0.json" ) , os.path.join(lowerCamelCase , "config.42.0.0.json" ) )
lowercase__ : str = AutoConfig.from_pretrained(lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 768 )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
lowercase__ : List[str] = "v4.0.0"
lowercase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase , return_unused_kwargs=lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__ : Dict = "v3.0.0"
lowercase__ : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 768 )
| 707
|
__a : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowercase__ : List[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
__a : int = True
__a : int = False
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase__ : Tuple = chain(next_number(SCREAMING_SNAKE_CASE_ ) )
lowercase__ : int = number_chain
while number < 10_00_00_00:
lowercase__ : Any = number_chain
number *= 10
return number_chain
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 10_00_00_00 ) -> int:
for i in range(1 ,SCREAMING_SNAKE_CASE_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 298
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =field(
metadata={'''help''': '''The output directory where the model will be written.'''} ,)
a__ =field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} ,)
a__ =field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} ,)
a__ =field(
default=_lowercase ,metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
a__ =field(
default=_lowercase ,metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def lowerCamelCase_ ():
_UpperCAmelCase : str = HfArgumentParser((ModelArguments,) )
((_UpperCAmelCase ) , ) : Union[str, Any] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_UpperCAmelCase : int = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_UpperCAmelCase : Any = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Optional[Any] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=snake_case__ , decoder_config=snake_case__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_UpperCAmelCase : Optional[int] = decoder_config.decoder_start_token_id
_UpperCAmelCase : Dict = decoder_config.pad_token_id
if decoder_start_token_id is None:
_UpperCAmelCase : Dict = decoder_config.bos_token_id
if pad_token_id is None:
_UpperCAmelCase : Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_UpperCAmelCase : Dict = decoder_config.eos_token_id
_UpperCAmelCase : List[Any] = decoder_start_token_id
_UpperCAmelCase : int = pad_token_id
_UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_UpperCAmelCase : str = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 506
|
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str:
super().__init__(
features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,)
A = Generator(
cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
A = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
A = None
A = None
A = None
A = None
self.builder.download_and_prepare(
download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,)
A = self.builder.as_dataset(
split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory )
return dataset
| 91
| 0
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AlbertTokenizer
__SCREAMING_SNAKE_CASE = AlbertTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = AlbertTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self , _snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase = '''this is a test'''
UpperCAmelCase = '''this is a test'''
return input_text, output_text
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(_snake_case ) , 3_0000 )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def snake_case_ ( self ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(_snake_case )
UpperCAmelCase = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
UpperCAmelCase = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(_snake_case )
UpperCAmelCase = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = AlbertTokenizer(_snake_case , keep_accents=_snake_case )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [48, 25, 21, 1289] )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(_snake_case , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = AlbertTokenizer(_snake_case )
UpperCAmelCase = tokenizer.encode('''sequence builders''' )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def snake_case_ ( self ) -> Any:
"""simple docstring"""
# fmt: off
UpperCAmelCase = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 391
|
def _lowerCAmelCase ( A__: float , A__: float , A__: int ):
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 391
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowercase__ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Any = CpmAntTokenizer
A_ : Any = False
def UpperCAmelCase_ ( self ):
super().setUp()
_SCREAMING_SNAKE_CASE : Any = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_SCREAMING_SNAKE_CASE : str = """今天天气真好!"""
_SCREAMING_SNAKE_CASE : List[str] = ["""今天""", """天气""", """真""", """好""", """!"""]
_SCREAMING_SNAKE_CASE : str = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : str = """今天天气真好!"""
_SCREAMING_SNAKE_CASE : int = [tokenizer.bos_token] + tokens
_SCREAMING_SNAKE_CASE : Dict = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(__snake_case )
self.assertEqual(__snake_case , __snake_case )
| 533
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : int = ["""pixel_values"""]
def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BICUBIC , __snake_case = True , __snake_case = None , __snake_case = True , __snake_case = 1 / 255 , __snake_case = True , __snake_case = None , __snake_case = None , __snake_case = True , **__snake_case , ):
super().__init__(**__snake_case )
_SCREAMING_SNAKE_CASE : int = size if size is not None else {"""shortest_edge""": 224}
_SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(__snake_case , default_to_square=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_SCREAMING_SNAKE_CASE : Any = get_size_dict(__snake_case , default_to_square=__snake_case , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE : str = do_resize
_SCREAMING_SNAKE_CASE : Union[str, Any] = size
_SCREAMING_SNAKE_CASE : List[str] = resample
_SCREAMING_SNAKE_CASE : str = do_center_crop
_SCREAMING_SNAKE_CASE : Optional[Any] = crop_size
_SCREAMING_SNAKE_CASE : Tuple = do_rescale
_SCREAMING_SNAKE_CASE : Dict = rescale_factor
_SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
_SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
_SCREAMING_SNAKE_CASE : Optional[int] = do_convert_rgb
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BICUBIC , __snake_case = None , **__snake_case , ):
_SCREAMING_SNAKE_CASE : int = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(__snake_case , size=size["""shortest_edge"""] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
_SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ):
_SCREAMING_SNAKE_CASE : Optional[int] = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE : Any = size if size is not None else self.size
_SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__snake_case , param_name="""size""" , default_to_square=__snake_case )
_SCREAMING_SNAKE_CASE : Any = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE : str = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__snake_case , param_name="""crop_size""" , default_to_square=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE : List[str] = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_SCREAMING_SNAKE_CASE : Optional[Any] = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_SCREAMING_SNAKE_CASE : Optional[int] = [convert_to_rgb(__snake_case ) for image in images]
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE : str = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE : Optional[Any] = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE : Tuple = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE : List[Any] = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE : Dict = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
_SCREAMING_SNAKE_CASE : str = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
_SCREAMING_SNAKE_CASE : int = {"""pixel_values""": images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 533
| 1
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ):
lowercase__ : List[Any] = tmp_path / """cache"""
lowercase__ : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[Any] = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict ):
lowercase__ : int = tmp_path / """cache"""
lowercase__ : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase__ : str = features.copy() if features else default_expected_features
lowercase__ : List[str] = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : Any = ParquetDatasetReader(lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ):
lowercase__ : List[str] = tmp_path / """cache"""
lowercase__ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase__ : List[Any] = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ , split=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str ):
if issubclass(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Union[str, Any] = parquet_path
elif issubclass(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Union[str, Any] = [parquet_path]
lowercase__ : Tuple = tmp_path / """cache"""
lowercase__ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase__ : str = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int]=("train",) ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
for split in splits:
lowercase__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] ):
lowercase__ : Optional[Any] = tmp_path / """cache"""
lowercase__ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : List[Any] = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any ):
lowercase__ : Optional[Any] = tmp_path / """cache"""
lowercase__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase__ : Tuple = features.copy() if features else default_expected_features
lowercase__ : Union[str, Any] = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : int = ParquetDatasetReader({"""train""": parquet_path} , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any ):
if split:
lowercase__ : List[str] = {split: parquet_path}
else:
lowercase__ : Any = """train"""
lowercase__ : Optional[Any] = {"""train""": parquet_path, """test""": parquet_path}
lowercase__ : Dict = tmp_path / """cache"""
lowercase__ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase__ : Tuple = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str ):
lowercase__ : str = ParquetDatasetWriter(lowerCamelCase__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
lowercase__ : Any = pq.ParquetFile(tmp_path / """foo.parquet""" )
lowercase__ : str = pf.read()
assert dataset.data.table == output_table
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] ):
lowercase__ : int = str(shared_datadir / """test_image_rgb.jpg""" )
lowercase__ : int = {"""image""": [image_path]}
lowercase__ : List[str] = Features({"""image""": Image()} )
lowercase__ : List[str] = Dataset.from_dict(lowerCamelCase__ , features=lowerCamelCase__ )
lowercase__ : str = ParquetDatasetWriter(lowerCamelCase__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
lowercase__ : Dict = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
lowercase__ : List[str] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=lowerCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Any ):
assert get_writer_batch_size(lowerCamelCase__ ) == expected
| 128
|
"""simple docstring"""
import os
def _lowerCamelCase ( lowerCamelCase__ : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) as in_file:
lowercase__ : Optional[Any] = in_file.read()
lowercase__ : Tuple = [[int(lowerCamelCase__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
lowercase__ : List[str] = [[0 for cell in row] for row in grid]
lowercase__ : Dict = len(grid[0] )
lowercase__ : List[Any] = [[0 for i in range(lowerCamelCase__ )] for j in range(lowerCamelCase__ )]
lowercase__ : Any = grid[0][0]
for i in range(1 , lowerCamelCase__ ):
lowercase__ : Union[str, Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowerCamelCase__ ):
lowercase__ : List[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowerCamelCase__ ):
for j in range(1 , lowerCamelCase__ ):
lowercase__ : Dict = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"{solution() = }")
| 128
| 1
|
def __snake_case ( _UpperCamelCase ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(_UpperCamelCase ) ):
series.append(f"1/{temp + 1}" if series else '''1''' )
return series
if __name__ == "__main__":
lowerCamelCase :Any = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 487
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ) -> Any:
_a , _a = 9, 14 # noqa: F841
_a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a = defaultdict(_UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_a = mst(_UpperCamelCase )
_a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_a = tuple(answer[:2] )
_a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 487
| 1
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class a_ ( unittest.TestCase ):
@classmethod
def UpperCamelCase ( cls : Tuple ) -> Optional[Any]:
snake_case: List[str] =TOKEN
HfFolder.save_token(a_ )
@classmethod
def UpperCamelCase ( cls : List[Any] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def UpperCamelCase ( self : List[Any] ) -> List[str]:
snake_case: Any =BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
snake_case: Dict =FlaxBertModel(a_ )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
snake_case: List[str] =FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
snake_case: int =flatten_dict(unfreeze(model.params ) )
snake_case: Any =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case: Optional[Any] =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a_ , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a_ , repo_id='test-model-flax' , push_to_hub=a_ , use_auth_token=self._token )
snake_case: Dict =FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
snake_case: str =flatten_dict(unfreeze(model.params ) )
snake_case: Union[str, Any] =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case: Union[str, Any] =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a_ , 1E-3 , msg=F'''{key} not identical''' )
def UpperCamelCase ( self : List[Any] ) -> int:
snake_case: str =BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
snake_case: Optional[Any] =FlaxBertModel(a_ )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
snake_case: Optional[int] =FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
snake_case: str =flatten_dict(unfreeze(model.params ) )
snake_case: Optional[Any] =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case: Optional[Any] =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a_ , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
a_ , repo_id='valid_org/test-model-flax-org' , push_to_hub=a_ , use_auth_token=self._token )
snake_case: str =FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
snake_case: List[str] =flatten_dict(unfreeze(model.params ) )
snake_case: Any =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case: Union[str, Any] =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a_ , 1E-3 , msg=F'''{key} not identical''' )
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
snake_case: Union[str, Any] =True
snake_case: List[Any] =flatten_dict(modela.params )
snake_case: str =flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
snake_case: List[Any] =False
return models_are_equal
@require_flax
class a_ ( unittest.TestCase ):
def UpperCamelCase ( self : str ) -> Optional[int]:
snake_case: List[Any] =BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
snake_case: Optional[Any] =FlaxBertModel(a_ )
snake_case: Any ='bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(a_ , a_ ) )
with self.assertRaises(a_ ):
snake_case: Tuple =FlaxBertModel.from_pretrained(a_ )
snake_case: Optional[int] =FlaxBertModel.from_pretrained(a_ , subfolder=a_ )
self.assertTrue(check_models_equal(a_ , a_ ) )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
snake_case: Any =BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
snake_case: int =FlaxBertModel(a_ )
snake_case: List[Any] ='bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(a_ , a_ ) , max_shard_size='10KB' )
with self.assertRaises(a_ ):
snake_case: Tuple =FlaxBertModel.from_pretrained(a_ )
snake_case: Dict =FlaxBertModel.from_pretrained(a_ , subfolder=a_ )
self.assertTrue(check_models_equal(a_ , a_ ) )
def UpperCamelCase ( self : List[Any] ) -> str:
snake_case: Optional[int] ='bert'
snake_case: List[Any] ='hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(a_ ):
snake_case: List[Any] =FlaxBertModel.from_pretrained(a_ )
snake_case: Optional[Any] =FlaxBertModel.from_pretrained(a_ , subfolder=a_ )
self.assertIsNotNone(a_ )
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
snake_case: Any ='bert'
snake_case: Union[str, Any] ='hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(a_ ):
snake_case: int =FlaxBertModel.from_pretrained(a_ )
snake_case: List[str] =FlaxBertModel.from_pretrained(a_ , subfolder=a_ )
self.assertIsNotNone(a_ )
| 347
|
'''simple docstring'''
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
snake_case: Optional[Any] =str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b"
snake_case: Union[str, Any] =str(bin(__UpperCAmelCase ) )[2:]
snake_case: List[Any] =max(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCAmelCase ) , b_binary.zfill(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
| 1
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
_a = [True] * (num + 1)
_a = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
_a = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Optional[Any] = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 22
|
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase__ : list[float], lowerCamelCase__ : Tuple ) -> Any:
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(lowerCamelCase__ ):
print(f'''{i}\t\t{d}''' )
def _lowerCAmelCase ( lowerCamelCase__ : list[dict[str, int]], lowerCamelCase__ : list[float], lowerCamelCase__ : int ) -> Tuple:
for j in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : str = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def _lowerCAmelCase ( lowerCamelCase__ : list[dict[str, int]], lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> list[float]:
_SCREAMING_SNAKE_CASE : Tuple = [float("inf" )] * vertex_count
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Dict = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
_SCREAMING_SNAKE_CASE : Dict = distance[u] + w
_SCREAMING_SNAKE_CASE : Union[str, Any] = check_negative_cycle(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Dict = int(input('''Enter number of vertices: ''').strip())
lowercase_ : Optional[int] = int(input('''Enter number of edges: ''').strip())
lowercase_ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase_ : str = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase_ : str = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase_ : Tuple = int(input('''\nEnter shortest path source:''').strip())
lowercase_ : int = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 705
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files", [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
], )
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfosDict.from_directory(lowerCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"dataset_info", [
DatasetInfo(),
DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=4_2, ),
], )
def _lowerCAmelCase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : DatasetInfo ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Dict = str(lowerCamelCase__ )
dataset_info.write_to_directory(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : int = DatasetInfo.from_directory(lowerCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase__, "dataset_info.json" ) )
def _lowerCAmelCase ( ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = DatasetInfo(
description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 4_2}], download_checksums={}, download_size=1_3_3_7, post_processing_size=4_4_2, dataset_size=1_2_3_4, size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4, )
_SCREAMING_SNAKE_CASE : List[str] = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_SCREAMING_SNAKE_CASE : Optional[Any] = yaml.safe_dump(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = yaml.safe_load(lowerCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def _lowerCAmelCase ( ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = DatasetInfo()
_SCREAMING_SNAKE_CASE : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict", [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=4_2, )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=4_2 ),
"v2": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
], )
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : DatasetInfosDict ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = str(lowerCamelCase__ )
dataset_infos_dict.write_to_directory(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = DatasetInfosDict.from_directory(lowerCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_SCREAMING_SNAKE_CASE : List[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_SCREAMING_SNAKE_CASE : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase__, "README.md" ) )
| 295
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Tuple = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowercase : int = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__lowercase : Dict = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase_ ( ):
lowerCamelCase_ = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCamelCase_ ( _lowerCamelCase : Any ):
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Any = VOCAB_FILES_NAMES
__lowercase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase :Dict = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase_ = json.load(UpperCamelCase__ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(UpperCamelCase__ )
lowerCamelCase_ = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(UpperCamelCase__ ):
try:
lowerCamelCase_ = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(UpperCamelCase__ )
lowerCamelCase_ = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(UpperCamelCase__ )
lowerCamelCase_ = ''' '''.join(UpperCamelCase__ )
lowerCamelCase_ = word
return word
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , UpperCamelCase__ ):
lowerCamelCase_ = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = ''''''.join(UpperCamelCase__ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
lowerCamelCase_ = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase_ = token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ''' ''' + text
return (text, kwargs)
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[str]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
lowerCamelCase_ = ''' '''.join(UpperCamelCase__ )
lowerCamelCase_ = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 142
|
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__lowercase : List[str] = logging.getLogger(__name__)
__lowercase : Dict = 5_0 # max width of layer names
__lowercase : Any = 7_0 # max width of quantizer names
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
lowerCamelCase_ = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=_lowerCamelCase , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=_lowerCamelCase , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=_lowerCamelCase , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=_lowerCamelCase , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=_lowerCamelCase , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=_lowerCamelCase , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def lowerCamelCase_ ( _lowerCamelCase : str ):
if args.calibrator == "max":
lowerCamelCase_ = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
lowerCamelCase_ = '''histogram'''
elif args.calibrator == "mse":
lowerCamelCase_ = '''histogram'''
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
lowerCamelCase_ = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCamelCase )
lowerCamelCase_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Any=False , _lowerCamelCase : List[Any]=False ):
logger.info('''Configuring Model for Quantization''' )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCamelCase , ['''embeddings'''] , which='''weight''' , _disabled=_lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(_lowerCamelCase , [''''''] , _disabled=_lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCamelCase , args.quant_disable_keyword , _disabled=_lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=_lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=_lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(_lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(_lowerCamelCase , _lowerCamelCase )
if args.clip_gelu:
clip_gelu(_lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Dict ):
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ):
def fusea(_lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : str ):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCamelCase , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
lowerCamelCase_ = qq._amax.detach().item()
lowerCamelCase_ = qk._amax.detach().item()
lowerCamelCase_ = qv._amax.detach().item()
lowerCamelCase_ = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
qq._amax.fill_(_lowerCamelCase )
qk._amax.fill_(_lowerCamelCase )
qv._amax.fill_(_lowerCamelCase )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
lowerCamelCase_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCamelCase )
lowerCamelCase_ = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def lowerCamelCase_ ( _lowerCamelCase : Any ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
lowerCamelCase_ = mod.weight.shape[0]
lowerCamelCase_ = mod._weight_quantizer._amax.detach()
lowerCamelCase_ = torch.ones(_lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCamelCase_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCamelCase_ = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCamelCase_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCamelCase , keepdims=_lowerCamelCase ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
lowerCamelCase_ = amax
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict=2_5 , _lowerCamelCase : List[Any]=1_8_0 , _lowerCamelCase : str=None ):
if ignore is None:
lowerCamelCase_ = []
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = [ignore]
lowerCamelCase_ = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCamelCase , '''weight''' ):
continue
lowerCamelCase_ = max(_lowerCamelCase , len(_lowerCamelCase ) )
for name, mod in model.named_modules():
lowerCamelCase_ = getattr(_lowerCamelCase , '''_input_quantizer''' , _lowerCamelCase )
lowerCamelCase_ = getattr(_lowerCamelCase , '''_weight_quantizer''' , _lowerCamelCase )
if not hasattr(_lowerCamelCase , '''weight''' ):
continue
if type(_lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(_lowerCamelCase ) is str and s in name]:
continue
lowerCamelCase_ = F"""Act:{input_q.extra_repr()}"""
lowerCamelCase_ = F"""Wgt:{weight_q.extra_repr()}"""
lowerCamelCase_ = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(_lowerCamelCase ) <= line_width:
logger.info(_lowerCamelCase )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{" ":{name_width}} {wgt_str}""" )
def lowerCamelCase_ ( _lowerCamelCase : int ):
lowerCamelCase_ = 0
for name, mod in model.named_modules():
if isinstance(_lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(_lowerCamelCase , _lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def lowerCamelCase_ ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]="both" , **_lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '''_input_quantizer''' , _lowerCamelCase , _lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '''_weight_quantizer''' , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , **_lowerCamelCase : List[str] ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '''_input_quantizer''' ) or hasattr(_lowerCamelCase , '''_weight_quantizer''' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
set_quantizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
| 142
| 1
|
from __future__ import annotations
lowerCamelCase_ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase_ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =len(snake_case__ )
for i in range(snake_case__ ):
SCREAMING_SNAKE_CASE__ =-1
for j in range(i + 1, snake_case__ ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE__ =arr[j]
break
result.append(snake_case__ )
return result
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =[]
for i, outer in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE__ =-1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE__ =inner
break
result.append(snake_case__ )
return result
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =len(snake_case__ )
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =[-1] * arr_size
for index in reversed(range(snake_case__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE__ =stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase_ = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 717
|
import functools
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase )
@functools.cache
def min_distance(__UpperCamelCase, __UpperCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
SCREAMING_SNAKE_CASE__ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1, __UpperCamelCase ), 1 + min_distance(__UpperCamelCase, indexa + 1 ), diff + min_distance(indexa + 1, indexa + 1 ), )
return min_distance(0, 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_lowercase : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_lowercase : Dict = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
A = bs[:]
A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
A = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def _lowerCAmelCase ( UpperCamelCase__: Tuple ) -> Optional[int]:
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , a__ , a__ , a__="replace" , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=False , **a__ , ) -> List[Any]:
A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
errors=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , **a__ , )
with open(a__ , encoding="""utf-8""" ) as vocab_handle:
A = json.load(a__ )
A = {v: k for k, v in self.encoder.items()}
A = errors # how to handle errors in decoding
A = bytes_to_unicode()
A = {v: k for k, v in self.byte_encoder.items()}
with open(a__ , encoding="""utf-8""" ) as merges_handle:
A = merges_handle.read().split("""\n""" )[1:-1]
A = [tuple(merge.split() ) for merge in bpe_merges]
A = dict(zip(a__ , range(len(a__ ) ) ) )
A = {}
A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _UpperCAmelCase ( self ) -> List[str]:
return len(self.encoder )
def _UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self , a__ ) -> Any:
if token in self.cache:
return self.cache[token]
A = tuple(a__ )
A = get_pairs(a__ )
if not pairs:
return token
while True:
A = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(a__ ):
try:
A = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(a__ )
A = new_word
if len(a__ ) == 1:
break
else:
A = get_pairs(a__ )
A = """ """.join(a__ )
A = word
return word
def _UpperCAmelCase ( self , a__ ) -> List[str]:
A = []
for token in re.findall(self.pat , a__ ):
A = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a__ ).split(""" """ ) )
return bpe_tokens
def _UpperCAmelCase ( self , a__ ) -> str:
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self , a__ ) -> Optional[Any]:
return self.decoder.get(a__ )
def _UpperCAmelCase ( self , a__ ) -> List[str]:
A = """""".join(a__ )
A = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _UpperCAmelCase ( self , a__ , a__ = None ) -> Tuple[str]:
if not os.path.isdir(a__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + """\n""" )
A = 0
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A = token_index
writer.write(""" """.join(a__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def _UpperCAmelCase ( self , a__ , a__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self , a__ , a__ = None , a__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def _UpperCAmelCase ( self , a__ , a__ = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , a__ , a__=False , **a__ ) -> Any:
A = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a__ ) > 0 and not text[0].isspace()):
A = """ """ + text
return (text, kwargs)
| 641
|
_lowercase : Dict = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 641
| 1
|
'''simple docstring'''
import os
import sys
import unittest
__snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__snake_case : Tuple = os.path.join(git_repo_path, 'src', 'diffusers')
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Union[str, Any]:
A_ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(_SCREAMING_SNAKE_CASE , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A_ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(_SCREAMING_SNAKE_CASE , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A_ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(_SCREAMING_SNAKE_CASE , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
A_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _SCREAMING_SNAKE_CASE )
self.assertIn('''torch_and_transformers''' , _SCREAMING_SNAKE_CASE )
self.assertIn('''flax_and_transformers''' , _SCREAMING_SNAKE_CASE )
self.assertIn('''torch_and_transformers_and_onnx''' , _SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Any:
A_ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''' )
A_ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
A_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A_ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> List[Any]:
A_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A_ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _SCREAMING_SNAKE_CASE )
| 713
|
'''simple docstring'''
__snake_case : List[Any] = 256
# Modulus to hash a string
__snake_case : str = 1_000_003
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str ) -> bool:
A_ = len(_UpperCamelCase )
A_ = len(_UpperCamelCase )
if p_len > t_len:
return False
A_ = 0
A_ = 0
A_ = 1
# Calculating the hash of pattern and substring of text
for i in range(_UpperCamelCase ):
A_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A_ = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _UpperCAmelCase ( ) -> None:
A_ = '''abc1abc12'''
A_ = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
A_ = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase ) and not rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 2)
A_ = '''ABABX'''
A_ = '''ABABZABABYABABX'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 3)
A_ = '''AAAB'''
A_ = '''ABAAAAAB'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 4)
A_ = '''abcdabcy'''
A_ = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 5)
A_ = '''Lü'''
A_ = '''Lüsai'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
A_ = '''Lue'''
assert not rabin_karp(_UpperCamelCase, _UpperCamelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 174
| 0
|
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase_ :
def __init__( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ) -> Optional[Any]:
if k in (0.0_4, 0.0_6):
UpperCAmelCase_ : Any = k
UpperCAmelCase_ : List[Any] = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : List[str] ) -> Tuple:
return str(self.k )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
UpperCAmelCase_ : Optional[Any] = img.shape
UpperCAmelCase_ : list[list[int]] = []
UpperCAmelCase_ : Tuple = img.copy()
UpperCAmelCase_ : int = cva.cvtColor(_SCREAMING_SNAKE_CASE , cva.COLOR_GRAY2RGB )
UpperCAmelCase_ : int = np.gradient(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = dx**2
UpperCAmelCase_ : Union[str, Any] = dy**2
UpperCAmelCase_ : Optional[int] = dx * dy
UpperCAmelCase_ : Optional[Any] = 0.0_4
UpperCAmelCase_ : int = self.window_size // 2
for y in range(_SCREAMING_SNAKE_CASE , h - offset ):
for x in range(_SCREAMING_SNAKE_CASE , w - offset ):
UpperCAmelCase_ : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase_ : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase_ : Optional[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase_ : List[Any] = (wxx * wyy) - (wxy**2)
UpperCAmelCase_ : List[str] = wxx + wyy
UpperCAmelCase_ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ = HarrisCorner(0.04, 3)
lowerCamelCase_ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 95
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {'vocab_file': 'spiece.model'}
lowerCAmelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase : Optional[int] = {
't5-small': 5_12,
't5-base': 5_12,
't5-large': 5_12,
't5-3b': 5_12,
't5-11b': 5_12,
}
lowerCAmelCase : Optional[int] = '▁'
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE_ : List[str] = [f"<extra_id_{i}>" for i in range(_SCREAMING_SNAKE_CASE )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE_ : Dict = len(set(filter(lambda _SCREAMING_SNAKE_CASE : bool('extra_id' in str(_SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
SCREAMING_SNAKE_CASE_ : int = legacy
SCREAMING_SNAKE_CASE_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , extra_ids=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Any = vocab_file
SCREAMING_SNAKE_CASE_ : Optional[Any] = extra_ids
SCREAMING_SNAKE_CASE_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
SCREAMING_SNAKE_CASE_ : int = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , _SCREAMING_SNAKE_CASE , )
return max_model_length
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def UpperCAmelCase ( self ):
"""simple docstring"""
return list(
set(filter(lambda _SCREAMING_SNAKE_CASE : bool(re.search(r'<extra_id_\d+>' , _SCREAMING_SNAKE_CASE ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
return [self._convert_token_to_id(_SCREAMING_SNAKE_CASE ) for token in self.get_sentinel_tokens()]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self._add_eos_if_not_present(_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE_ : int = self._add_eos_if_not_present(_SCREAMING_SNAKE_CASE )
return token_ids_a + token_ids_a
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.legacy:
SCREAMING_SNAKE_CASE_ : Dict = SPIECE_UNDERLINE + text.replace(_SCREAMING_SNAKE_CASE , ' ' )
return super().tokenize(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.legacy:
SCREAMING_SNAKE_CASE_ : List[str] = text.startswith(_SCREAMING_SNAKE_CASE )
if is_first:
SCREAMING_SNAKE_CASE_ : Optional[int] = text[1:]
SCREAMING_SNAKE_CASE_ : Tuple = self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Any = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token.startswith('<extra_id_' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.match(r'<extra_id_(\d+)>' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE_ : str = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = f"<extra_id_{self.vocab_size - 1 - index}>"
return token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = ''
SCREAMING_SNAKE_CASE_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 511
| 0
|
__A = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 716
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( A_ , A_ , A_ ):
@register_to_config
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ) -> List[str]:
super().__init__()
__a = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
__a = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
__a = False
__a = nn.Dropout(p=lowerCamelCase_ )
__a = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
__a = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
__a = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
__a = TaLayerNorm(lowerCamelCase_ )
__a = nn.Dropout(p=lowerCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int ) -> Tuple:
__a = self.token_embedder(lowerCamelCase_ )
__a = encoder_input_tokens.shape[1]
__a = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
__a = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
__a = encoder_input_tokens.size()
__a = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
__a = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
__a = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 173
| 0
|
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class A :
snake_case__ :Dict = None
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = os.path.join(__magic_name__ , "feat_extract.json" )
feat_extract_first.to_json_file(__magic_name__ )
lowerCAmelCase__ = self.feature_extraction_class.from_json_file(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
lowerCAmelCase__ = self.feature_extraction_class.from_pretrained(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.feature_extraction_class()
self.assertIsNotNone(__magic_name__ )
| 48
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = BertJapaneseTokenizer
UpperCAmelCase_ = False
UpperCAmelCase_ = True
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
super().setUp()
_lowercase : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
_lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self : Dict , UpperCamelCase : Dict ):
"""simple docstring"""
_lowercase : List[str] = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowercase : List[str] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase : Tuple ):
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.get_input_output_texts(UpperCamelCase )
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
_lowercase : Tuple = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
return text, ids
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : str = self.tokenizer_class(self.vocab_file )
_lowercase : List[str] = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(UpperCamelCase )
_lowercase : Any = '''こんにちは、世界。\nこんばんは、世界。'''
_lowercase : List[Any] = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowercase : Any = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCamelCase , '''wb''' ) as handle:
pickle.dump(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , '''rb''' ) as handle:
_lowercase : int = pickle.load(UpperCamelCase )
_lowercase : Tuple = tokenizer_new.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_lowercase : str = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
try:
_lowercase : List[str] = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
try:
_lowercase : Any = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Tuple = MecabTokenizer(do_lower_case=UpperCamelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
try:
_lowercase : Tuple = MecabTokenizer(
do_lower_case=UpperCamelCase , normalize_text=UpperCamelCase , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : Dict = MecabTokenizer(normalize_text=UpperCamelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(UpperCamelCase )
_lowercase : Dict = '''こんにちは、世界。\nこんばんは、世界。'''
_lowercase : Dict = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowercase : Union[str, Any] = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCamelCase , '''wb''' ) as handle:
pickle.dump(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , '''rb''' ) as handle:
_lowercase : Tuple = pickle.load(UpperCamelCase )
_lowercase : Tuple = tokenizer_new.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@require_sudachi
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : Dict = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : List[str] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : Dict = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : List[str] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : str = SudachiTokenizer(do_lower_case=UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : Optional[Any] = SudachiTokenizer(normalize_text=UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : Optional[Any] = SudachiTokenizer(trim_whitespace=UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_lowercase : List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(UpperCamelCase )
_lowercase : Optional[int] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowercase : str = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowercase : Tuple = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCamelCase , '''wb''' ) as handle:
pickle.dump(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , '''rb''' ) as handle:
_lowercase : List[Any] = pickle.load(UpperCamelCase )
_lowercase : Any = tokenizer_new.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@require_jumanpp
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : Dict = JumanppTokenizer(do_lower_case=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : Dict = JumanppTokenizer(normalize_text=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : Optional[int] = JumanppTokenizer(trim_whitespace=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_lowercase : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
_lowercase : int = {}
for i, token in enumerate(UpperCamelCase ):
_lowercase : Optional[Any] = i
_lowercase : str = WordpieceTokenizer(vocab=UpperCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : List[Any] = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
_lowercase : Dict = tokenizer.subword_tokenizer
_lowercase : Dict = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(UpperCamelCase , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
_lowercase : int = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(UpperCamelCase , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : str = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
_lowercase : str = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCamelCase )
_lowercase : List[Any] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCamelCase )
_lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_lowercase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = BertJapaneseTokenizer
UpperCAmelCase_ = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
_lowercase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self : Optional[int] , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : List[Any] ):
"""simple docstring"""
_lowercase : Tuple = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowercase : List[str] = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_lowercase : List[Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
_lowercase : List[Any] = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
UpperCamelCase , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowercase : int = {}
for i, token in enumerate(UpperCamelCase ):
_lowercase : Dict = i
_lowercase : List[str] = CharacterTokenizer(vocab=UpperCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_lowercase : List[Any] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
_lowercase : Tuple = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCamelCase )
_lowercase : Optional[Any] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCamelCase )
_lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_lowercase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : int = '''cl-tohoku/bert-base-japanese'''
_lowercase : Any = AutoTokenizer.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Any = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
_lowercase : Any = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 322
| 0
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _UpperCAmelCase ):
a : List[Any] = (DDPMScheduler,)
def _snake_case ( self : List[str] , **__UpperCamelCase : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__UpperCamelCase )
return config
def _snake_case ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def _snake_case ( self : int ) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def _snake_case ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def _snake_case ( self : int ) ->Union[str, Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def _snake_case ( self : int ) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def _snake_case ( self : int ) ->int:
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__UpperCamelCase )
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
_UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase = pred_prev_sample
_UpperCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
_UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase = pred_prev_sample
_UpperCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCamelCase )
_UpperCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__UpperCamelCase ):
if i == len(__UpperCamelCase ) - 1:
_UpperCAmelCase = -1
else:
_UpperCAmelCase = timesteps[i + 1]
_UpperCAmelCase = scheduler.previous_timestep(__UpperCamelCase )
_UpperCAmelCase = prev_t.item()
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(__UpperCamelCase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
def _snake_case ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = [1_00, 87, 50, 1, 0]
_UpperCAmelCase = len(__UpperCamelCase )
with self.assertRaises(__UpperCamelCase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCamelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
| 19
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( _A , _A , _A ) -> float:
"""simple docstring"""
_UpperCAmelCase = x
_UpperCAmelCase = y
for step in range(_A ): # noqa: B007
_UpperCAmelCase = a * a - b * b + x
_UpperCAmelCase = 2 * a * b + y
_UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) )
def _UpperCamelCase ( _A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ) -> Image.Image:
"""simple docstring"""
_UpperCAmelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(_A ):
for image_y in range(_A ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase = figure_width / image_width * image_height
_UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase = get_distance(_A , _A , _A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase = get_color_coded_rgb(_A )
else:
_UpperCAmelCase = get_black_and_white_rgb(_A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 19
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
_UpperCAmelCase = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
_UpperCAmelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
_UpperCAmelCase = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __UpperCamelCase , atol=1e-3 ) )
@slow
def _a ( self ) -> str:
_UpperCAmelCase = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
_UpperCAmelCase = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
_UpperCAmelCase = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
_UpperCAmelCase = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __UpperCamelCase , atol=1e-3 ) )
| 657
|
import numpy as np
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case = 1E-12 , __snake_case = 100 , ):
assert np.shape(__snake_case )[0] == np.shape(__snake_case )[1]
# Ensure proper dimensionality.
assert np.shape(__snake_case )[0] == np.shape(__snake_case )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__snake_case ) == np.iscomplexobj(__snake_case )
__lowerCAmelCase = np.iscomplexobj(__snake_case )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__snake_case , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(__snake_case , __snake_case )
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(__snake_case )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(__snake_case , np.dot(__snake_case , __snake_case ) )
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_ )
return lambda_, vector
def __lowerCAmelCase ( ):
__lowerCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__lowerCAmelCase = np.array([41, 4, 20] )
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa )
__lowerCAmelCase = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(__snake_case , __snake_case )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(__snake_case )
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__snake_case ) - np.abs(__snake_case ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 367
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 1
|
import sys
def lowerCAmelCase_ (lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
lowerCAmelCase__ = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
lowerCAmelCase__ = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
lowerCAmelCase__ = a + chain_length - 1
lowerCAmelCase__ = sys.maxsize
for c in range(lowercase__ , lowercase__ ):
lowerCAmelCase__ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCAmelCase__ = cost
lowerCAmelCase__ = c
return matrix, sol
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if i == j:
print('''A''' + str(lowercase__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(''')''' , end=''' ''' )
def lowerCAmelCase_ () -> str:
'''simple docstring'''
lowerCAmelCase__ = [30, 35, 15, 5, 10, 20, 25]
lowerCAmelCase__ = len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCAmelCase__ , lowerCAmelCase__ = matrix_chain_order(lowercase__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 668
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase : str = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_UpperCAmelCase : List[str] = {
"ctrl": 256,
}
_UpperCAmelCase : int = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(lowercase__ )
return pairs
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Optional[int] = CONTROL_CODES
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , **SCREAMING_SNAKE_CASE_ : Tuple ):
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = {}
@property
def __snake_case ( self : List[str] ):
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Any ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCAmelCase__ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
return word
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 668
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : Optional[int] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def a__ ( ):
"""simple docstring"""
_snake_case : Dict = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : Union[str, Any] = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def a__ ( a : int=None ):
"""simple docstring"""
if subparsers is not None:
_snake_case : Any = subparsers.add_parser("config" , description=a )
else:
_snake_case : Optional[Any] = argparse.ArgumentParser("Accelerate config command" , description=a )
parser.add_argument(
"--config_file" , default=a , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=a )
return parser
def a__ ( a : Optional[Any] ):
"""simple docstring"""
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[Any] = args.config_file
else:
if not os.path.isdir(a ):
os.makedirs(a )
_snake_case : Optional[Any] = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(a )
else:
config.to_yaml_file(a )
print(f'accelerate configuration saved at {config_file}' )
def a__ ( ):
"""simple docstring"""
_snake_case : Optional[int] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(a )
if __name__ == "__main__":
main()
| 87
|
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _snake_case):
__lowercase : int = """EncodecFeatureExtractor"""
__lowercase : str = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
_snake_case : Dict = self.feature_extractor
_snake_case : Any = False
def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
_snake_case : str = kwargs.pop("audio" , snake_case_ )
_snake_case : Optional[int] = kwargs.pop("sampling_rate" , snake_case_ )
_snake_case : Optional[Any] = kwargs.pop("text" , snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : Any = args[0]
_snake_case : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
_snake_case : Any = self.tokenizer(snake_case_ , **snake_case_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_snake_case : str = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
_snake_case : List[str] = audio_inputs["padding_mask"]
return inputs
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
_snake_case : Tuple = kwargs.pop("audio" , snake_case_ )
_snake_case : List[str] = kwargs.pop("padding_mask" , snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : Tuple = args[0]
_snake_case : Dict = args[1:]
if audio_values is not None:
return self._decode_audio(snake_case_ , padding_mask=snake_case_ )
else:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ):
_snake_case : Optional[int] = to_numpy(snake_case_ )
_snake_case , _snake_case , _snake_case : Tuple = audio_values.shape
if padding_mask is None:
return list(snake_case_ )
_snake_case : Optional[int] = to_numpy(snake_case_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_snake_case : Any = seq_len - padding_mask.shape[-1]
_snake_case : Optional[Any] = 1 - self.feature_extractor.padding_value
_snake_case : Optional[int] = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ )
_snake_case : Any = audio_values.tolist()
for i in range(snake_case_ ):
_snake_case : Tuple = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_snake_case : Tuple = sliced_audio.reshape(snake_case_ , -1 )
return audio_values
| 87
| 1
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase =get_tests_dir("fixtures/test_sentencepiece.model")
lowerCamelCase =get_tests_dir("fixtures/test_sentencepiece_bpe.model")
lowerCamelCase ="pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CamembertTokenizer
SCREAMING_SNAKE_CASE_ = CamembertTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ : str = CamembertTokenizer(__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = '''<pad>'''
UpperCamelCase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1_0_0_4 )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Tuple = CamembertTokenizer(__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
UpperCamelCase__ : List[str] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCamelCase__ : Any = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCamelCase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase__ : Any = self.get_tokenizer()
UpperCamelCase__ : List[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : int = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = self.get_rust_tokenizer()
UpperCamelCase__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = {'''input_ids''': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCamelCase__ : Tuple = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=__SCREAMING_SNAKE_CASE , )
| 285
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any = parent
UpperCamelCase__ : str = batch_size
UpperCamelCase__ : List[Any] = seq_length
UpperCamelCase__ : List[Any] = is_training
UpperCamelCase__ : Any = use_input_mask
UpperCamelCase__ : Dict = use_token_type_ids
UpperCamelCase__ : List[str] = use_labels
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_act
UpperCamelCase__ : Optional[Any] = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = max_position_embeddings
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : int = type_sequence_label_size
UpperCamelCase__ : Tuple = initializer_range
UpperCamelCase__ : Optional[Any] = num_labels
UpperCamelCase__ : List[str] = num_choices
UpperCamelCase__ : Union[str, Any] = scope
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Tuple = None
if self.use_input_mask:
UpperCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : int = None
if self.use_token_type_ids:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : int = None
UpperCamelCase__ : Tuple = None
if self.use_labels:
UpperCamelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = BioGptModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Dict = BioGptForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = BioGptModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# create attention mask
UpperCamelCase__ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = self.seq_length // 2
UpperCamelCase__ : List[str] = 0
# first forward pass
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCamelCase__ : Tuple = ids_tensor((1,) , __SCREAMING_SNAKE_CASE ).item() + 1
UpperCamelCase__ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCamelCase__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCamelCase__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )] , dim=1 , )
# get two different outputs
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
# select random slice
UpperCamelCase__ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[str] = BioGptModel(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase__ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
# first forward pass
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
UpperCamelCase__ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[
'''last_hidden_state'''
]
# select random slice
UpperCamelCase__ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = BioGptForCausalLM(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCamelCase__ : Tuple = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = BioGptModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.num_labels
UpperCamelCase__ : Any = BioGptForTokenClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,
) : List[str] = config_and_inputs
UpperCamelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict = BioGptModelTester(self )
UpperCamelCase__ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ : Optional[Any] = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__SCREAMING_SNAKE_CASE , gradient_checkpointing=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Dict = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : Optional[Any] = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCamelCase__ : Optional[int] = tokenizer.eos_token
UpperCamelCase__ : List[str] = model.config.eos_token_id
# use different length sentences to test batching
UpperCamelCase__ : Optional[int] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCamelCase__ : Dict = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = inputs['''input_ids'''].to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''].to(__SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : int = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = model.generate(input_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCamelCase__ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings )
UpperCamelCase__ : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Optional[int] = BioGptModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : int = 3
UpperCamelCase__ : Tuple = input_dict['''input_ids''']
UpperCamelCase__ : List[Any] = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ : Optional[int] = BioGptForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : List[str] = 3
UpperCamelCase__ : List[Any] = '''multi_label_classification'''
UpperCamelCase__ : List[str] = input_dict['''input_ids''']
UpperCamelCase__ : Tuple = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ : List[str] = BioGptForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : Union[str, Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
UpperCamelCase__ : Tuple = model(__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : Optional[Any] = 4_2_3_8_4
UpperCamelCase__ : List[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : List[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
UpperCamelCase__ : Dict = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = model.generate(
**__SCREAMING_SNAKE_CASE , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 285
| 1
|
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : Optional[Any] = logging.StreamHandler() # Set sys.stderr as stream.
_a : Any = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : Optional[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : Any = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : Tuple = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[str] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : int = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Tuple = _get_library_root_logger().handlers
for handler in handlers:
_a : Optional[int] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Dict , *__a : Any , **__a : List[Any] ):
"""simple docstring"""
_a : Tuple = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : Optional[Any] , *__a : Union[str, Any] , **__a : int ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ,*_a : List[str] ,**_a : Tuple ): # pylint: disable=unused-argument
'''simple docstring'''
_a : Tuple = args[0] if args else None
def __iter__( self : Optional[Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : int ,_a : Dict ):
'''simple docstring'''
def empty_fn(*_a : Any ,**_a : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[int] ):
'''simple docstring'''
return self
def __exit__( self : Union[str, Any] ,_a : Any ,_a : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Optional[int] ,**_a : List[Any] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : Union[str, Any] ,*_a : Dict ,**_a : Union[str, Any] ):
'''simple docstring'''
_a : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Any = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Optional[int] = False
hf_hub_utils.disable_progress_bars()
| 319
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase = [8, 5, 9, 7]
__lowerCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : list[int] ,_a : list[list[int]] ,_a : list[list[int]] ,):
'''simple docstring'''
_a : Dict = claim_vector
_a : List[str] = allocated_resources_table
_a : List[Any] = maximum_claim_table
def __lowercase ( self : Tuple ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowercase ( self : Tuple ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowercase ( self : int ):
'''simple docstring'''
return {self.__need().index(_a ): i for i in self.__need()}
def __lowercase ( self : Optional[Any] ,**_a : Dict ):
'''simple docstring'''
_a : Optional[int] = self.__need()
_a : str = self.__allocated_resources_table
_a : int = self.__available_resources()
_a : Dict = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
_a : List[str] = False
for each_need in need_list:
_a : List[str] = True
for index, need in enumerate(_a ):
if need > available_resources[index]:
_a : Dict = False
break
if execution:
_a : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : int = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(_a )
# update available/freed resources stack
_a : Optional[int] = np.array(_a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __lowercase ( self : Tuple ):
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 1
|
class _A :
def __init__( self : Any ):
"""simple docstring"""
__UpperCamelCase : Any = 0
__UpperCamelCase : int = 0
__UpperCamelCase : Optional[int] = {}
def a ( self : Tuple , lowerCamelCase__ : int ):
"""simple docstring"""
if vertex not in self.adjacency:
__UpperCamelCase : List[str] = {}
self.num_vertices += 1
def a ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ):
"""simple docstring"""
self.add_vertex(lowerCamelCase__ )
self.add_vertex(lowerCamelCase__ )
if head == tail:
return
__UpperCamelCase : Optional[Any] = weight
__UpperCamelCase : str = weight
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : str = self.get_edges()
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase__ ) ):
__UpperCamelCase : Any = list(edges[i] )
edges.sort(key=lambda lowerCamelCase__ : e[2] )
for i in range(len(lowerCamelCase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__UpperCamelCase : Any = edges[i][2] + 1
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = edge
__UpperCamelCase : Any = weight
__UpperCamelCase : str = weight
def __str__( self : Dict ):
"""simple docstring"""
__UpperCamelCase : str = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__UpperCamelCase : Tuple = self.adjacency[head][tail]
string += f'{head} -> {tail} == {weight}\n'
return string.rstrip("""\n""" )
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a ( self : Optional[Any] ):
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = Graph()
if vertices is None:
__UpperCamelCase : Optional[int] = []
if edges is None:
__UpperCamelCase : str = []
for vertex in vertices:
g.add_vertex(lowerCamelCase__ )
for edge in edges:
g.add_edge(*lowerCamelCase__ )
return g
class _A :
def __init__( self : List[str] ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : List[Any] = {}
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.parent )
def a ( self : int , lowerCamelCase__ : Optional[int] ):
"""simple docstring"""
if item in self.parent:
return self.find(lowerCamelCase__ )
__UpperCamelCase : Dict = item
__UpperCamelCase : Optional[int] = 0
return item
def a ( self : List[str] , lowerCamelCase__ : List[Any] ):
"""simple docstring"""
if item not in self.parent:
return self.make_set(lowerCamelCase__ )
if item != self.parent[item]:
__UpperCamelCase : Any = self.find(self.parent[item] )
return self.parent[item]
def a ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict ):
"""simple docstring"""
__UpperCamelCase : Dict = self.find(lowerCamelCase__ )
__UpperCamelCase : Optional[int] = self.find(lowerCamelCase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__UpperCamelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
__UpperCamelCase : Dict = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__UpperCamelCase : Tuple = roota
return roota
return None
@staticmethod
def a ( lowerCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Dict = graph.num_vertices
__UpperCamelCase : Any = Graph.UnionFind()
__UpperCamelCase : int = []
while num_components > 1:
__UpperCamelCase : Optional[int] = {}
for vertex in graph.get_vertices():
__UpperCamelCase : Dict = -1
__UpperCamelCase : Tuple = graph.get_edges()
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = edge
edges.remove((tail, head, weight) )
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = edge
__UpperCamelCase : str = union_find.find(lowerCamelCase__ )
__UpperCamelCase : List[str] = union_find.find(lowerCamelCase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCamelCase : List[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCamelCase : Union[str, Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = cheap_edge[vertex]
if union_find.find(lowerCamelCase__ ) != union_find.find(lowerCamelCase__ ):
union_find.union(lowerCamelCase__ , lowerCamelCase__ )
mst_edges.append(cheap_edge[vertex] )
__UpperCamelCase : Optional[Any] = num_components - 1
__UpperCamelCase : Union[str, Any] = Graph.build(edges=lowerCamelCase__ )
return mst
| 269
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase = '\nimport os\n'
UpperCamelCase = '\ndef foo():\n import os\n return False\n'
UpperCamelCase = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
UpperCamelCase = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
UpperCamelCase = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
UpperCamelCase = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
UpperCamelCase = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
UpperCamelCase = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
UpperCamelCase = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
UpperCamelCase = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> str:
__UpperCamelCase : str = os.path.join(__lowerCAmelCase , """test_file.py""" )
with open(__lowerCAmelCase , """w""" ) as _tmp_file:
_tmp_file.write(__lowerCAmelCase )
__UpperCamelCase : Optional[int] = get_imports(__lowerCAmelCase )
assert parsed_imports == ["os"]
| 269
| 1
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
lowerCamelCase = False
lowerCamelCase = False
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Optional[int]:
return TrainCommand(__UpperCamelCase )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def _A ( __UpperCAmelCase ):
"""simple docstring"""
a__ : List[Any] = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=__UpperCAmelCase , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=__UpperCAmelCase , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=__UpperCAmelCase , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=__UpperCAmelCase , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=__UpperCAmelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=__UpperCAmelCase , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=__UpperCAmelCase , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=__UpperCAmelCase , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=__UpperCAmelCase , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=__UpperCAmelCase , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=__UpperCAmelCase , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=__UpperCAmelCase , default=1E-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Optional[int] = logging.get_logger("transformers-cli/training" )
a__ : str = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=__UpperCAmelCase )
a__ : Dict = args.output
a__ : str = args.column_label
a__ : Optional[int] = args.column_text
a__ : List[Any] = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
a__ : List[str] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
a__ : Any = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a__ : List[Any] = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
a__ : Any = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a__ : str = args.validation_split
a__ : int = args.train_batch_size
a__ : Dict = args.valid_batch_size
a__ : Dict = args.learning_rate
a__ : List[str] = args.adam_epsilon
def _A ( self ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _A ( self ):
"""simple docstring"""
raise NotImplementedError
def _A ( self ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 207
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 207
| 1
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''owlvit_text_model'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=4_9_4_0_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : int=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Dict="quick_gelu" , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=4_9_4_0_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_9_4_0_7 , **SCREAMING_SNAKE_CASE__ : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : List[Any] = vocab_size
__a : Dict = hidden_size
__a : Optional[Any] = intermediate_size
__a : Any = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[Any] = max_position_embeddings
__a : Dict = hidden_act
__a : str = layer_norm_eps
__a : List[str] = attention_dropout
__a : Optional[int] = initializer_range
__a : List[Any] = initializer_factor
@classmethod
def __lowerCAmelCase ( cls : int , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
__a , __a : Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__a : Dict = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = '''owlvit_vision_model'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Dict=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : List[Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : int=3_2 , SCREAMING_SNAKE_CASE__ : Tuple="quick_gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1.0 , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__a : str = hidden_size
__a : int = intermediate_size
__a : Dict = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = num_channels
__a : Tuple = image_size
__a : str = patch_size
__a : List[str] = hidden_act
__a : Dict = layer_norm_eps
__a : Union[str, Any] = attention_dropout
__a : int = initializer_range
__a : List[str] = initializer_factor
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
__a , __a : int = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__a : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = '''owlvit'''
__SCREAMING_SNAKE_CASE : str = True
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Any=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=2.6_592 , SCREAMING_SNAKE_CASE__ : Dict=True , **SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
if text_config is None:
__a : int = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
__a : Optional[int] = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
__a : int = OwlViTTextConfig(**SCREAMING_SNAKE_CASE__ )
__a : str = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE__ )
__a : str = projection_dim
__a : Union[str, Any] = logit_scale_init_value
__a : Union[str, Any] = return_dict
__a : int = 1.0
@classmethod
def __lowerCAmelCase ( cls : Any , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
__a , __a : int = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Union[str, Any] = {}
__a : List[str] = text_config
__a : Tuple = vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : List[Any] = copy.deepcopy(self.__dict__ )
__a : Optional[Any] = self.text_config.to_dict()
__a : Dict = self.vision_config.to_dict()
__a : Dict = self.__class__.model_type
return output
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return 1e-4
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : "ProcessorMixin" , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , ):
'''simple docstring'''
__a : Tuple = super().generate_dummy_inputs(
processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
__a : Dict = super().generate_dummy_inputs(
processor.image_processor , batch_size=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
return {**text_input_dict, **image_input_dict}
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return 1_4
| 47
|
from ..utils import DummyObject, requires_backends
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 47
| 1
|
import string
from math import logaa
def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> int:
'''simple docstring'''
_A = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
_A = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any] ) -> tuple[int, int]:
'''simple docstring'''
_A = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
_A = corpus_without_punctuation.split('\n' )
_A = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__UpperCamelCase ))
def _snake_case ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : List[Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def _snake_case ( _snake_case : int , _snake_case : str ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 708
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''gptsan-japanese'''
UpperCAmelCase : List[Any] = [
'''past_key_values''',
]
UpperCAmelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Any , _UpperCAmelCase : List[Any]=36_000 , _UpperCAmelCase : str=1_280 , _UpperCAmelCase : Tuple=1_024 , _UpperCAmelCase : Union[str, Any]=8_192 , _UpperCAmelCase : Any=4_096 , _UpperCAmelCase : Optional[int]=128 , _UpperCAmelCase : int=10 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Optional[Any]=128 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Optional[Any]=1E-5 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[str]="float32" , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : str=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Optional[Any]=0.002 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]=35_998 , _UpperCAmelCase : Any=35_995 , _UpperCAmelCase : Any=35_999 , **_UpperCAmelCase : Any , ):
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = d_ff
_A = d_ext
_A = d_spout
_A = num_switch_layers
_A = num_ext_layers
_A = num_switch_layers + num_ext_layers
_A = num_heads
_A = num_experts
_A = expert_capacity
_A = dropout_rate
_A = layer_norm_epsilon
_A = router_bias
_A = router_jitter_noise
_A = router_dtype
_A = router_ignore_padding_tokens
_A = output_hidden_states
_A = output_attentions
_A = initializer_factor
_A = output_router_logits
_A = use_cache
super().__init__(
separator_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 505
| 0
|
'''simple docstring'''
def A_ ( _lowerCAmelCase : int = 50 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44
|
def _lowerCAmelCase ( _lowerCAmelCase = 100 ) -> int:
'''simple docstring'''
__snake_case = n * (n + 1) * (2 * n + 1) / 6
__snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 371
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a =logging.get_logger(__name__)
a ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
a ={'mobilebert-uncased': 512}
a ={}
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : List[str] = PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = MobileBertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
lowerCamelCase__ =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCamelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ =getattr(_lowerCamelCase , normalizer_state.pop("type" ) )
lowerCamelCase__ =do_lower_case
lowerCamelCase__ =strip_accents
lowerCamelCase__ =tokenize_chinese_chars
lowerCamelCase__ =normalizer_class(**_lowerCamelCase )
lowerCamelCase__ =do_lower_case
def _a ( self , _lowerCamelCase , _lowerCamelCase=None ):
lowerCamelCase__ =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ =[self.sep_token_id]
lowerCamelCase__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ =self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 132
| 0
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=__lowercase , )
assert hasattr(self , 'env' )
def snake_case__ ( self , __lowercase=1 ):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
TrainingJobAnalytics(__lowercase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__A : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__A : Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__A : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__A : str = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __lowercase )
| 365
|
def __snake_case ( _lowerCAmelCase : int ) -> bool:
if num < 0:
return False
A_ : int = num
A_ : int = 0
while num > 0:
A_ : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 454
| 0
|
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Tuple:
super().__init__()
_lowerCAmelCase = value_function
_lowerCAmelCase = unet
_lowerCAmelCase = scheduler
_lowerCAmelCase = env
_lowerCAmelCase = env.get_dataset()
_lowerCAmelCase = {}
for key in self.data.keys():
try:
_lowerCAmelCase = self.data[key].mean()
except: # noqa: E722
pass
_lowerCAmelCase = {}
for key in self.data.keys():
try:
_lowerCAmelCase = self.data[key].std()
except: # noqa: E722
pass
_lowerCAmelCase = env.observation_space.shape[0]
_lowerCAmelCase = env.action_space.shape[0]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return (x_in - self.means[key]) / self.stds[key]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return x_in * self.stds[key] + self.means[key]
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
if type(_lowerCAmelCase ) is dict:
return {k: self.to_torch(_lowerCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(_lowerCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(_lowerCAmelCase , device=self.unet.device )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
for key, val in cond.items():
_lowerCAmelCase = val.clone()
return x_in
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = x.shape[0]
_lowerCAmelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_lowerCAmelCase = torch.full((batch_size,) , _lowerCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(_lowerCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_lowerCAmelCase = self.value_function(x.permute(0 , 2 , 1 ) , _lowerCAmelCase ).sample
_lowerCAmelCase = torch.autograd.grad([y.sum()] , [x] )[0]
_lowerCAmelCase = self.scheduler._get_variance(_lowerCAmelCase )
_lowerCAmelCase = torch.exp(0.5 * posterior_variance )
_lowerCAmelCase = model_std * grad
_lowerCAmelCase = 0
_lowerCAmelCase = x.detach()
_lowerCAmelCase = x + scale * grad
_lowerCAmelCase = self.reset_xa(_lowerCAmelCase , _lowerCAmelCase , self.action_dim )
_lowerCAmelCase = self.unet(x.permute(0 , 2 , 1 ) , _lowerCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_lowerCAmelCase = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , predict_epsilon=_lowerCAmelCase )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
_lowerCAmelCase = self.reset_xa(_lowerCAmelCase , _lowerCAmelCase , self.action_dim )
_lowerCAmelCase = self.to_torch(_lowerCAmelCase )
return x, y
def __call__( self , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=0.1 ) -> str:
# normalize the observations and create batch dimension
_lowerCAmelCase = self.normalize(_lowerCAmelCase , "observations" )
_lowerCAmelCase = obs[None].repeat(_lowerCAmelCase , axis=0 )
_lowerCAmelCase = {0: self.to_torch(_lowerCAmelCase )}
_lowerCAmelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_lowerCAmelCase = randn_tensor(_lowerCAmelCase , device=self.unet.device )
_lowerCAmelCase = self.reset_xa(_lowerCAmelCase , _lowerCAmelCase , self.action_dim )
_lowerCAmelCase = self.to_torch(_lowerCAmelCase )
# run the diffusion process
_lowerCAmelCase , _lowerCAmelCase = self.run_diffusion(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# sort output trajectories by value
_lowerCAmelCase = y.argsort(0 , descending=_lowerCAmelCase ).squeeze()
_lowerCAmelCase = x[sorted_idx]
_lowerCAmelCase = sorted_values[:, :, : self.action_dim]
_lowerCAmelCase = actions.detach().cpu().numpy()
_lowerCAmelCase = self.de_normalize(_lowerCAmelCase , key="actions" )
# select the action with the highest value
if y is not None:
_lowerCAmelCase = 0
else:
# if we didn't run value guiding, select a random action
_lowerCAmelCase = np.random.randint(0 , _lowerCAmelCase )
_lowerCAmelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 489
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(SCREAMING_SNAKE_CASE_ ) )
def __a(SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE_ ):
return True
# Recursive Step
for i in range(SCREAMING_SNAKE_CASE_ ):
if valid_coloring(graph[index] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Color current vertex
_lowerCAmelCase = i
# Validate coloring
if util_color(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 ):
return True
# Backtrack
_lowerCAmelCase = -1
return False
def __a(SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = [-1] * len(SCREAMING_SNAKE_CASE_ )
if util_color(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 ):
return colored_vertices
return []
| 489
| 1
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A__ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def UpperCamelCase( __UpperCamelCase : np.ndarray ,__UpperCamelCase : float ,__UpperCamelCase : int = 16000 ):
lowerCAmelCase_ : Union[str, Any] = int(round(sample_rate * max_length ) )
if len(__UpperCamelCase ) <= sample_length:
return wav
lowerCAmelCase_ : Tuple = randint(0 ,len(__UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __snake_case :
_a = field(default=UpperCamelCase_ ,metadata={'''help''': '''Name of a dataset from the datasets package'''} )
_a = field(
default=UpperCamelCase_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_a = field(
default=UpperCamelCase_ ,metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
_a = field(
default=UpperCamelCase_ ,metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
_a = field(
default='''train''' ,metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} ,)
_a = field(
default='''validation''' ,metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} ,)
_a = field(
default='''audio''' ,metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} ,)
_a = field(
default='''label''' ,metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
_a = field(
default=UpperCamelCase_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} ,)
_a = field(
default=UpperCamelCase_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} ,)
_a = field(
default=20 ,metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} ,)
@dataclass
class __snake_case :
_a = field(
default='''facebook/wav2vec2-base''' ,metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ,)
_a = field(
default=UpperCamelCase_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_a = field(
default=UpperCamelCase_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
_a = field(
default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,)
_a = field(
default=UpperCamelCase_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} )
_a = field(
default=UpperCamelCase_ ,metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
_a = field(
default=UpperCamelCase_ ,metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
_a = field(
default=UpperCamelCase_ ,metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} ,)
_a = field(
default=UpperCamelCase_ ,metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_a = field(
default=UpperCamelCase_ ,metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} ,)
def UpperCAmelCase__ ( self : Dict):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , A_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''')
def UpperCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' ,__UpperCamelCase ,__UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase_ : Any = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCAmelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
lowerCAmelCase_ : Optional[Any] = DatasetDict()
lowerCAmelCase_ : Optional[int] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase_ : Optional[Any] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCAmelCase_ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCAmelCase_ : int = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCAmelCase_ : int = feature_extractor.model_input_names[0]
def train_transforms(__UpperCamelCase : int ):
lowerCAmelCase_ : List[Any] = []
for audio in batch[data_args.audio_column_name]:
lowerCAmelCase_ : int = random_subsample(
audio['''array'''] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__UpperCamelCase )
lowerCAmelCase_ : Dict = feature_extractor(__UpperCamelCase ,sampling_rate=feature_extractor.sampling_rate )
lowerCAmelCase_ : Union[str, Any] = {model_input_name: inputs.get(__UpperCamelCase )}
lowerCAmelCase_ : int = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__UpperCamelCase : int ):
lowerCAmelCase_ : str = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
lowerCAmelCase_ : Dict = feature_extractor(__UpperCamelCase ,sampling_rate=feature_extractor.sampling_rate )
lowerCAmelCase_ : Optional[Any] = {model_input_name: inputs.get(__UpperCamelCase )}
lowerCAmelCase_ : Tuple = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCAmelCase_ : Optional[int] = raw_datasets['''train'''].features[data_args.label_column_name].names
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = {}, {}
for i, label in enumerate(__UpperCamelCase ):
lowerCAmelCase_ : int = str(__UpperCamelCase )
lowerCAmelCase_ : Dict = label
# Load the accuracy metric from the datasets package
lowerCAmelCase_ : List[str] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__UpperCamelCase : Any ):
lowerCAmelCase_ : int = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=__UpperCamelCase ,references=eval_pred.label_ids )
lowerCAmelCase_ : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(__UpperCamelCase ) ,labelaid=__UpperCamelCase ,idalabel=__UpperCamelCase ,finetuning_task='''audio-classification''' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase_ : Optional[Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase_ : List[Any] = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__UpperCamelCase ,output_all_columns=__UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase_ : Union[str, Any] = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__UpperCamelCase ,output_all_columns=__UpperCamelCase )
# Initialize our trainer
lowerCAmelCase_ : str = Trainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=raw_datasets['''train'''] if training_args.do_train else None ,eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None ,compute_metrics=__UpperCamelCase ,tokenizer=__UpperCamelCase ,)
# Training
if training_args.do_train:
lowerCAmelCase_ : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase_ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase_ : Union[str, Any] = last_checkpoint
lowerCAmelCase_ : int = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''' ,train_result.metrics )
trainer.save_metrics('''train''' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase_ : Dict = trainer.evaluate()
trainer.log_metrics('''eval''' ,__UpperCamelCase )
trainer.save_metrics('''eval''' ,__UpperCamelCase )
# Write model card and (optionally) push to hub
lowerCAmelCase_ : Optional[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
if __name__ == "__main__":
main()
| 171
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A__ : str = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
A__ : Union[str, Any] = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split()
A__ : Any = '''|'''.join(sys.argv[1:])
A__ : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
A__ : List[str] = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 171
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
SCREAMING_SNAKE_CASE = tuple[int, int]
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A , ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
__a = self.g_cost + self.h_cost
def snake_case_ ( self ):
__a = self.pos_x - self.goal_x
__a = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__A ) + abs(__A )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , __A ):
return self.f_cost < other.f_cost
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __A )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __A )
__a = [self.start]
__a = []
__a = False
def snake_case_ ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__A )
self.closed_nodes.append(__A )
__a = self.get_successors(__A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__A )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(__A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__A )
else:
self.open_nodes.append(__A )
return [self.start.pos]
def snake_case_ ( self , __A ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__A , __A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __A , ) )
return successors
def snake_case_ ( self , __A ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A ):
__a = AStar(__A , __A )
__a = AStar(__A , __A )
__a = False
def snake_case_ ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__a = self.fwd_astar.open_nodes.pop(0 )
__a = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__A , __A )
self.fwd_astar.closed_nodes.append(__A )
self.bwd_astar.closed_nodes.append(__A )
__a = current_bwd_node
__a = current_fwd_node
__a = {
self.fwd_astar: self.fwd_astar.get_successors(__A ),
self.bwd_astar: self.bwd_astar.get_successors(__A ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__A )
else:
# retrieve the best current path
__a = astar.open_nodes.pop(
astar.open_nodes.index(__A ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__A )
else:
astar.open_nodes.append(__A )
return [self.fwd_astar.start.pos]
def snake_case_ ( self , __A , __A ):
__a = self.fwd_astar.retrace_path(__A )
__a = self.bwd_astar.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
__a = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE = AStar(init, goal)
SCREAMING_SNAKE_CASE = a_star.search()
SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 209
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """imagegpt"""
_lowerCamelCase = ["""past_key_values"""]
_lowerCamelCase = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __A=512 + 1 , __A=32 * 32 , __A=512 , __A=24 , __A=8 , __A=None , __A="quick_gelu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1E-5 , __A=0.02 , __A=True , __A=True , __A=False , __A=False , __A=False , **__A , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = scale_attn_by_inverse_layer_idx
__a = reorder_and_upcast_attn
__a = tie_word_embeddings
super().__init__(tie_word_embeddings=__A , **__A )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
@property
def snake_case_ ( self ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def snake_case_ ( self , __A , __A = 1 , __A = -1 , __A = False , __A = None , __A = 3 , __A = 32 , __A = 32 , ):
__a = self._generate_dummy_images(__A , __A , __A , __A )
__a = dict(preprocessor(images=__A , return_tensors=__A ) )
return inputs
| 209
| 1
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case , _snake_case = set(SCREAMING_SNAKE_CASE__ ), [start]
while stack:
_snake_case = stack.pop()
explored.add(SCREAMING_SNAKE_CASE__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(SCREAMING_SNAKE_CASE__ )
return explored
__magic_name__ : Union[str, Any] = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 672
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( a ):
"""simple docstring"""
A_ = 'vit_msn'
def __init__( self , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-06 , _lowerCamelCase=2_2_4 , _lowerCamelCase=1_6 , _lowerCamelCase=3 , _lowerCamelCase=True , **_lowerCamelCase , )-> Optional[Any]:
super().__init__(**_lowerCamelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
| 161
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase :
lowercase = PegasusConfig
lowercase = {}
lowercase = """gelu"""
def __init__( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict=1_3 , __magic_name__ : List[str]=7 , __magic_name__ : int=True , __magic_name__ : List[Any]=False , __magic_name__ : List[Any]=9_9 , __magic_name__ : int=3_2 , __magic_name__ : int=2 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[str]=3_7 , __magic_name__ : Tuple=0.1 , __magic_name__ : Dict=0.1 , __magic_name__ : Any=4_0 , __magic_name__ : List[str]=2 , __magic_name__ : Any=1 , __magic_name__ : int=0 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_pegasus_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def lowerCamelCase_ ( self : Any , __magic_name__ : str , __magic_name__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = TFPegasusModel(config=__magic_name__ ).get_decoder()
UpperCamelCase = inputs_dict["""input_ids"""]
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase = inputs_dict["""head_mask"""]
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ )[0]
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1e-3 )
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : str=None , ):
"""simple docstring"""
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
lowercase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowercase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFPegasusModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__magic_name__ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
lowercase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowercase = """google/pegasus-xsum"""
@cached_property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase_ ( self : str , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.translate_src_text(**__magic_name__ )
assert self.expected_text == generated_words
def lowerCamelCase_ ( self : Tuple , **__magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(self.src_text , **__magic_name__ , padding=__magic_name__ , return_tensors="""tf""" )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__magic_name__ , )
UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__magic_name__ )
return generated_words
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 708
|
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
UpperCamelCase , UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase , UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = """▁"""
_a = {"""vocab_file""": """sentencepiece.bpe.model"""}
_a = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
_a = {
"""facebook/xglm-564M""": 2048,
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ['input_ids', 'attention_mask']
def __init__( self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a = None , **__a , ) -> None:
'''simple docstring'''
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCamelCase = 7
_UpperCamelCase = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words)]
_UpperCamelCase = kwargs.get('''additional_special_tokens''' , [])
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__a))
_UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
_UpperCamelCase = len(self.sp_model)
_UpperCamelCase = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)}
self.fairseq_tokens_to_ids.update(__a)
_UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
_UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def UpperCAmelCase ( self , __a , __a = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCamelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self , __a , __a = None , __a = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a)
if token_ids_a is None:
return [1] + ([0] * len(__a))
return [1] + ([0] * len(__a)) + [1, 1] + ([0] * len(__a))
def UpperCAmelCase ( self , __a , __a = None) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a) * [0]
@property
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = {self.convert_ids_to_tokens(__a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__a , out_type=__a)
def UpperCAmelCase ( self , __a) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase = self.sp_model.PieceToId(__a)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCAmelCase ( self , __a) -> int:
'''simple docstring'''
_UpperCamelCase = ''''''.join(__a).replace(__a , ''' ''').strip()
return out_string
def UpperCAmelCase ( self , __a , __a = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __a)
elif not os.path.isfile(self.vocab_file):
with open(__a , '''wb''') as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a)
return (out_vocab_file,)
| 19
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', __snake_case ):
pass
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', __snake_case ) is None
with patch_submodule(_test_patching, '''len''', __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', __snake_case ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', __snake_case ):
pass
| 19
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
UpperCamelCase = {
"junnyu/roformer_chinese_small": 1_536,
"junnyu/roformer_chinese_base": 1_536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
UpperCamelCase = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = RoFormerTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
):
_lowercase : Any = getattr(_lowerCAmelCase , pre_tok_state.pop('type' ) )
_lowercase : Union[str, Any] = do_lower_case
_lowercase : Optional[int] = strip_accents
_lowercase : int = pre_tok_class(**_lowerCAmelCase )
_lowercase : Tuple = do_lower_case
def __getstate__( self ):
_lowercase : Tuple = self.__dict__.copy()
_lowercase : str = BertPreTokenizer()
return state
def __setstate__( self , _lowerCAmelCase ):
_lowercase : int = d
_lowercase : Any = self.__dict__['_tokenizer'].get_vocab()
_lowercase : int = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Optional[Any] = [self.sep_token_id]
_lowercase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , **_lowerCAmelCase , ):
_lowercase : str = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 677
|
import qiskit
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts:
_lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 677
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 195
|
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( _snake_case : float ,_snake_case : int ):
'''simple docstring'''
lowercase__ = u
for i in range(1 ,_snake_case ):
lowercase__ = temp * (u - i)
return temp
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = int(input("enter the numbers of values: " ) )
lowercase__ = []
for _ in range(_snake_case ):
y.append([] )
for i in range(_snake_case ):
for j in range(_snake_case ):
y[i].append(_snake_case )
lowercase__ = 0
print("enter the values of parameters in a list: " )
lowercase__ = list(map(_snake_case ,input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(_snake_case ):
lowercase__ = float(input() )
lowercase__ = int(input("enter the value to interpolate: " ) )
lowercase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 ,_snake_case ):
for j in range(n - i ):
lowercase__ = y[j + 1][i - 1] - y[j][i - 1]
lowercase__ = y[0][0]
for i in range(1 ,_snake_case ):
summ += (ucal(_snake_case ,_snake_case ) * y[0][i]) / math.factorial(_snake_case )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 267
| 0
|
"""simple docstring"""
a : int = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 700
|
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
| 0
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_a : List[str] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_a : Tuple = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_a : List[str] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Tuple ) -> List[str]:
_lowerCAmelCase : List[str] = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ) -> int:
_lowerCAmelCase : Optional[int] = random.randint(0 ,len(lowercase_ ) - 1 )
_lowerCAmelCase : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
_lowerCAmelCase : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : str ) -> Optional[Any]:
_lowerCAmelCase : Tuple = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_lowerCAmelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[Any] ,) -> List[str]:
_lowerCAmelCase : Optional[int] = []
# Generate more children proportionally to the fitness score.
_lowerCAmelCase : Union[str, Any] = int(parent_a[1] * 100 ) + 1
_lowerCAmelCase : Any = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_lowerCAmelCase : Any = population_score[random.randint(0 ,lowercase_ )][0]
_lowerCAmelCase : int = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : str = True ) -> Optional[Any]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_lowerCAmelCase : Optional[Any] = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_lowerCAmelCase : Optional[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_lowerCAmelCase : str = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(lowercase_ )
# Generate random starting population.
_lowerCAmelCase : List[Any] = []
for _ in range(lowercase_ ):
population.append("""""".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_lowerCAmelCase : Dict = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowerCAmelCase : List[str] = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_lowerCAmelCase : Tuple = sorted(lowercase_ ,key=lambda _lowerCamelCase : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowerCAmelCase : Dict = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_lowerCAmelCase : List[str] = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_a : str = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
_a : Optional[int] = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
_a , _a , _a : Optional[Any] = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 213
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """vit_msn"""
def __init__( self , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.02 , a__=1e-06 , a__=224 , a__=16 , a__=3 , a__=True , **a__ , ):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Optional[int] = qkv_bias
| 114
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = AudioLDMPipeline
lowercase__ = TEXT_TO_AUDIO_PARAMS
lowercase__ = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase__ = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
torch.manual_seed(0)
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , )
_UpperCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0)
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
_UpperCamelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
_UpperCamelCase = ClapTextModelWithProjection(__a)
_UpperCamelCase = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77)
_UpperCamelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , )
_UpperCamelCase = SpeechTaHifiGan(__a)
_UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def UpperCAmelCase ( self , __a , __a=0) -> str:
'''simple docstring'''
if str(__a).startswith('''mps'''):
_UpperCamelCase = torch.manual_seed(__a)
else:
_UpperCamelCase = torch.Generator(device=__a).manual_seed(__a)
_UpperCamelCase = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = AudioLDMPipeline(**__a)
_UpperCamelCase = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = audioldm_pipe(**__a)
_UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a) == 2_56
_UpperCamelCase = audio[:10]
_UpperCamelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = AudioLDMPipeline(**__a)
_UpperCamelCase = audioldm_pipe.to(__a)
_UpperCamelCase = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = 3 * [inputs['''prompt''']]
# forward
_UpperCamelCase = audioldm_pipe(**__a)
_UpperCamelCase = output.audios[0]
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = 3 * [inputs.pop('''prompt''')]
_UpperCamelCase = audioldm_pipe.tokenizer(
__a , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs['''input_ids'''].to(__a)
_UpperCamelCase = audioldm_pipe.text_encoder(
__a , )
_UpperCamelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_UpperCamelCase = F.normalize(__a , dim=-1)
_UpperCamelCase = prompt_embeds
# forward
_UpperCamelCase = audioldm_pipe(**__a)
_UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = AudioLDMPipeline(**__a)
_UpperCamelCase = audioldm_pipe.to(__a)
_UpperCamelCase = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = 3 * ['''this is a negative prompt''']
_UpperCamelCase = negative_prompt
_UpperCamelCase = 3 * [inputs['''prompt''']]
# forward
_UpperCamelCase = audioldm_pipe(**__a)
_UpperCamelCase = output.audios[0]
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = 3 * [inputs.pop('''prompt''')]
_UpperCamelCase = []
for p in [prompt, negative_prompt]:
_UpperCamelCase = audioldm_pipe.tokenizer(
__a , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs['''input_ids'''].to(__a)
_UpperCamelCase = audioldm_pipe.text_encoder(
__a , )
_UpperCamelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_UpperCamelCase = F.normalize(__a , dim=-1)
embeds.append(__a)
_UpperCamelCase , _UpperCamelCase = embeds
# forward
_UpperCamelCase = audioldm_pipe(**__a)
_UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = PNDMScheduler(skip_prk_steps=__a)
_UpperCamelCase = AudioLDMPipeline(**__a)
_UpperCamelCase = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = '''egg cracking'''
_UpperCamelCase = audioldm_pipe(**__a , negative_prompt=__a)
_UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a) == 2_56
_UpperCamelCase = audio[:10]
_UpperCamelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = PNDMScheduler(skip_prk_steps=__a)
_UpperCamelCase = AudioLDMPipeline(**__a)
_UpperCamelCase = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
_UpperCamelCase = audioldm_pipe(__a , num_inference_steps=2).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_UpperCamelCase = 2
_UpperCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
_UpperCamelCase = 2
_UpperCamelCase = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
_UpperCamelCase = 2
_UpperCamelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = AudioLDMPipeline(**__a)
_UpperCamelCase = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = audioldm_pipe.vocoder.config.sampling_rate
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **__a)
_UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a) / vocoder_sampling_rate == 0.016
_UpperCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **__a)
_UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a) / vocoder_sampling_rate == 0.032
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = AudioLDMPipeline(**__a)
_UpperCamelCase = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = ['''hey''']
_UpperCamelCase = audioldm_pipe(__a , num_inference_steps=1)
_UpperCamelCase = output.audios.shape
assert audio_shape == (1, 2_56)
_UpperCamelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_UpperCamelCase = SpeechTaHifiGan(__a).to(__a)
_UpperCamelCase = audioldm_pipe(__a , num_inference_steps=1)
_UpperCamelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=__a)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a)
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , __a , __a="cpu" , __a=torch.floataa , __a=0) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = torch.Generator(device=__a).manual_seed(__a)
_UpperCamelCase = np.random.RandomState(__a).standard_normal((1, 8, 1_28, 16))
_UpperCamelCase = torch.from_numpy(__a).to(device=__a , dtype=__a)
_UpperCamelCase = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
_UpperCamelCase = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_inputs(__a)
_UpperCamelCase = 25
_UpperCamelCase = audioldm_pipe(**__a).audios[0]
assert audio.ndim == 1
assert len(__a) == 8_19_20
_UpperCamelCase = audio[7_72_30:7_72_40]
_UpperCamelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315])
_UpperCamelCase = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-2
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
_UpperCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
_UpperCamelCase = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_inputs(__a)
_UpperCamelCase = audioldm_pipe(**__a).audios[0]
assert audio.ndim == 1
assert len(__a) == 8_19_20
_UpperCamelCase = audio[2_77_80:2_77_90]
_UpperCamelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212])
_UpperCamelCase = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3e-2
| 78
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _UpperCAmelCase:
lowercase__ = MBartConfig
lowercase__ = {}
lowercase__ = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> Any:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_mbart_inputs_dict(__a , __a , __a)
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFMBartModel(config=__a).get_decoder()
_UpperCamelCase = inputs_dict['''input_ids''']
_UpperCamelCase = input_ids[:1, :]
_UpperCamelCase = inputs_dict['''attention_mask'''][:1, :]
_UpperCamelCase = inputs_dict['''head_mask''']
_UpperCamelCase = 1
# first forward pass
_UpperCamelCase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a)
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
_UpperCamelCase = past_key_values[1]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=None, ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = tf.cast(tf.math.not_equal(__snake_case, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFMBartModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a)
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
lowercase__ = [
' UN Chief Says There Is No Military Solution in Syria',
]
lowercase__ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
lowercase__ = 'facebook/mbart-large-en-ro'
@cached_property
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def UpperCAmelCase ( self , **__a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.translate_src_text(**__a)
self.assertListEqual(self.expected_text , __a)
def UpperCAmelCase ( self , **__a) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , **__a , return_tensors='''tf''')
_UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2)
_UpperCamelCase = self.tokenizer.batch_decode(__a , skip_special_tokens=__a)
return generated_words
@slow
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 78
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 3
SCREAMING_SNAKE_CASE_ : Dict = (32, 32)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ )
return image
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=a__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(a__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Any = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE_ : Optional[int] = DDPMScheduler()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_vae
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : str = Image.fromarray(np.uinta(a__ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionUpscalePipeline(
unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=350 , )
SCREAMING_SNAKE_CASE_ : Tuple = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE_ : Any = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = sd_pipe(
[prompt] , image=a__ , generator=a__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE_ : Dict = output.images
SCREAMING_SNAKE_CASE_ : Any = torch.Generator(device=a__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sd_pipe(
[prompt] , image=a__ , generator=a__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=a__ , )[0]
SCREAMING_SNAKE_CASE_ : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : str = image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Any = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DDPMScheduler()
SCREAMING_SNAKE_CASE_ : Optional[int] = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_vae
SCREAMING_SNAKE_CASE_ : str = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : str = Image.fromarray(np.uinta(a__ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : List[str] = StableDiffusionUpscalePipeline(
unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=350 , )
SCREAMING_SNAKE_CASE_ : List[Any] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ : Any = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE_ : int = output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device=a__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = sd_pipe(
[prompt] , image=a__ , generator=a__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE_ : Optional[Any] = DDPMScheduler()
SCREAMING_SNAKE_CASE_ : int = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : str = self.dummy_vae
SCREAMING_SNAKE_CASE_ : str = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : Any = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.fromarray(np.uinta(a__ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE_ : Optional[Any] = unet.half()
SCREAMING_SNAKE_CASE_ : Dict = text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionUpscalePipeline(
unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=350 , )
SCREAMING_SNAKE_CASE_ : int = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE_ : int = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = sd_pipe(
[prompt] , image=a__ , generator=a__ , num_inference_steps=2 , output_type='np' , ).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
SCREAMING_SNAKE_CASE_ : List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionUpscalePipeline.from_pretrained(a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : Any = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE_ : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=a__ , image=a__ , generator=a__ , output_type='np' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
SCREAMING_SNAKE_CASE_ : List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE_ : str = StableDiffusionUpscalePipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : int = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = pipe(
prompt=a__ , image=a__ , generator=a__ , output_type='np' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE_ : Any = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : str = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(
prompt=a__ , image=a__ , generator=a__ , num_inference_steps=5 , output_type='np' , )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 511
|
"""simple docstring"""
import math
from collections.abc import Callable
def UpperCAmelCase ( snake_case : Callable[[float], float] , snake_case : float , snake_case : float ):
_lowerCAmelCase:float = xa
_lowerCAmelCase:float = xa
while True:
if x_n == x_na or function(snake_case ) == function(snake_case ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
_lowerCAmelCase:float = x_na - (
function(snake_case ) / ((function(snake_case ) - function(snake_case )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_lowerCAmelCase:Optional[int] = x_na
_lowerCAmelCase:Dict = x_na
def UpperCAmelCase ( snake_case : float ):
return math.pow(snake_case , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 227
| 0
|
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_12,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Tuple:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
lowercase__ : List[Any] = parser.parse_args()
lowercase__ : str = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 338
|
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : Tuple ) -> Union[str, Any]:
__A : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
__A : Dict = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__A : Union[str, Any] = min(__snake_case , __snake_case )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 338
| 1
|
'''simple docstring'''
import numpy as np
import qiskit
def A__ ( __lowerCAmelCase : int = 8 , __lowerCAmelCase : int | None = None ):
lowerCamelCase__ = np.random.default_rng(seed=__lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCamelCase__ = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCamelCase__ = rng.integers(2 , size=__lowerCAmelCase )
# The set of states Alice will prepare.
lowerCamelCase__ = rng.integers(2 , size=__lowerCAmelCase )
# Measurement basis for Bob's qubits.
lowerCamelCase__ = rng.integers(2 , size=__lowerCAmelCase )
# Quantum Circuit to simulate BB84
lowerCamelCase__ = qiskit.QuantumCircuit(__lowerCAmelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCamelCase__ = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCamelCase__ = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1 , seed_simulator=__lowerCAmelCase )
# Returns the result of measurement.
lowerCamelCase__ = job.result().get_counts(__lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCamelCase__ = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCamelCase__ = gen_key[:key_len] if len(__lowerCAmelCase ) >= key_len else gen_key.ljust(__lowerCAmelCase , """0""" )
return key
if __name__ == "__main__":
print(F'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 50
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__magic_name__ = True
except ImportError:
__magic_name__ = False
try:
from torch.hub import _get_torch_home
__magic_name__ = _get_torch_home()
except ImportError:
__magic_name__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
__magic_name__ = os.path.join(torch_cache_home, "transformers")
__magic_name__ = "https://cdn.huggingface.co"
__magic_name__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
__magic_name__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
__magic_name__ = os.path.join(PATH, "config.yaml")
__magic_name__ = os.path.join(PATH, "attributes.txt")
__magic_name__ = os.path.join(PATH, "objects.txt")
__magic_name__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
__magic_name__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
__magic_name__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
__magic_name__ = "pytorch_model.bin"
__magic_name__ = "config.yaml"
def _lowerCamelCase ( UpperCAmelCase__=OBJECTS,UpperCAmelCase__=ATTRIBUTES ) -> Optional[Any]:
'''simple docstring'''
a__ = []
with open(UpperCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
a__ = []
with open(UpperCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def _lowerCamelCase ( UpperCAmelCase__ ) -> Any:
'''simple docstring'''
a__ = OrderedDict()
with open(UpperCAmelCase__,'rb' ) as f:
a__ = pkl.load(UpperCAmelCase__ )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
a__ = ckp.pop(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__,np.ndarray ):
a__ = torch.tensor(UpperCAmelCase__ )
else:
assert isinstance(UpperCAmelCase__,torch.tensor ), type(UpperCAmelCase__ )
a__ = v
return r
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
a_ : str ={}
def __init__( self : List[str] , _snake_case : dict , _snake_case : str = "root" , _snake_case : List[Any]=0 ) -> List[str]:
'''simple docstring'''
a__ = name
a__ = level
a__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
a__ = copy.deepcopy(_snake_case )
a__ = copy.deepcopy(_snake_case )
if isinstance(_snake_case , _snake_case ):
a__ = Config(_snake_case , name=_snake_case , level=level + 1 )
a__ = v
setattr(self , _snake_case , _snake_case )
a__ = d
def __repr__( self : Union[str, Any] ) -> int:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self : str , _snake_case : int , _snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ = val
a__ = val
a__ = key.split('.' )
a__ = len(_snake_case ) - 1
a__ = self._pointer
if len(_snake_case ) > 1:
for i, l in enumerate(_snake_case ):
if hasattr(self , _snake_case ) and isinstance(getattr(self , _snake_case ) , _snake_case ):
setattr(getattr(self , _snake_case ) , '.'.join(levels[i:] ) , _snake_case )
if l == last_level:
a__ = val
else:
a__ = pointer[l]
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self._pointer
def _lowerCAmelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Dict ) -> Any:
'''simple docstring'''
with open(F'''{file_name}''' , 'w' ) as stream:
dump(_snake_case , _snake_case )
def _lowerCAmelCase ( self : Tuple , _snake_case : Tuple , _snake_case : Any ) -> Optional[int]:
'''simple docstring'''
with open(F'''{file_name}''' , 'w' ) as stream:
json.dump(_snake_case , _snake_case )
@staticmethod
def _lowerCAmelCase ( _snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_snake_case ) as stream:
a__ = load(_snake_case , Loader=_snake_case )
return data
def __str__( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
a__ = ' '
if self._name != "root":
a__ = F'''{t * (self._level-1)}{self._name}:\n'''
else:
a__ = ''
a__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_snake_case , _snake_case ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_snake_case ).__name__})\n'''
a__ = level
return r[:-1]
@classmethod
def _lowerCAmelCase ( cls : Any , _snake_case : str , **_snake_case : Dict ) -> List[str]:
'''simple docstring'''
a__ , a__ = cls.get_config_dict(_snake_case , **_snake_case )
return cls(_snake_case )
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] , _snake_case : str , **_snake_case : str ) -> List[str]:
'''simple docstring'''
a__ = kwargs.pop('cache_dir' , _snake_case )
a__ = kwargs.pop('force_download' , _snake_case )
a__ = kwargs.pop('resume_download' , _snake_case )
a__ = kwargs.pop('proxies' , _snake_case )
a__ = kwargs.pop('local_files_only' , _snake_case )
if os.path.isdir(_snake_case ):
a__ = os.path.join(_snake_case , _snake_case )
elif os.path.isfile(_snake_case ) or is_remote_url(_snake_case ):
a__ = pretrained_model_name_or_path
else:
a__ = hf_bucket_url(_snake_case , filename=_snake_case , use_cdn=_snake_case )
try:
# Load from URL or cache if already cached
a__ = cached_path(
_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
a__ = Config.load_yaml(_snake_case )
except EnvironmentError:
a__ = 'Can\'t load config for'
raise EnvironmentError(_snake_case )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(_snake_case ), kwargs
def _lowerCamelCase ( UpperCAmelCase__ ) -> Any:
'''simple docstring'''
a__ = torch.load('dump.pt',map_location=in_tensor.device )
a__ = in_tensor.numpy()
a__ = out_tensor.numpy()[0]
print(na.shape,na[0, 0, :5] )
print(na.shape,na[0, 0, :5] )
assert np.allclose(UpperCAmelCase__,UpperCAmelCase__,rtol=0.01,atol=0.1 ), (
f'''{sum([1 for x in np.isclose(UpperCAmelCase__,UpperCAmelCase__,rtol=0.01,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'''
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def _lowerCamelCase ( UpperCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ = urlparse(UpperCAmelCase__ )
return parsed.scheme in ("http", "https")
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__=True ) -> str:
'''simple docstring'''
a__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
a__ = '/' not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__=None,UpperCAmelCase__=0,UpperCAmelCase__=None,) -> Optional[Any]:
'''simple docstring'''
a__ = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(UpperCAmelCase__,UpperCAmelCase__ ):
ua += "; " + "; ".join('{}/{}'.format(UpperCAmelCase__,UpperCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(UpperCAmelCase__,UpperCAmelCase__ ):
ua += "; " + user_agent
a__ = {'user-agent': ua}
if resume_size > 0:
a__ = 'bytes=%d-' % (resume_size,)
a__ = requests.get(UpperCAmelCase__,stream=UpperCAmelCase__,proxies=UpperCAmelCase__,headers=UpperCAmelCase__ )
if response.status_code == 4_16: # Range not satisfiable
return
a__ = response.headers.get('Content-Length' )
a__ = resume_size + int(UpperCAmelCase__ ) if content_length is not None else None
a__ = tqdm(
unit='B',unit_scale=UpperCAmelCase__,total=UpperCAmelCase__,initial=UpperCAmelCase__,desc='Downloading',)
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(UpperCAmelCase__ ) )
temp_file.write(UpperCAmelCase__ )
progress.close()
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__=None,UpperCAmelCase__=False,UpperCAmelCase__=None,UpperCAmelCase__=10,UpperCAmelCase__=False,UpperCAmelCase__=None,UpperCAmelCase__=False,) -> Dict:
'''simple docstring'''
if cache_dir is None:
a__ = TRANSFORMERS_CACHE
if isinstance(UpperCAmelCase__,UpperCAmelCase__ ):
a__ = str(UpperCAmelCase__ )
os.makedirs(UpperCAmelCase__,exist_ok=UpperCAmelCase__ )
a__ = None
if not local_files_only:
try:
a__ = requests.head(UpperCAmelCase__,allow_redirects=UpperCAmelCase__,proxies=UpperCAmelCase__,timeout=UpperCAmelCase__ )
if response.status_code == 2_00:
a__ = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
a__ = url_to_filename(UpperCAmelCase__,UpperCAmelCase__ )
# get cache path to put the file
a__ = os.path.join(UpperCAmelCase__,UpperCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(UpperCAmelCase__ ):
return cache_path
else:
a__ = [
file
for file in fnmatch.filter(os.listdir(UpperCAmelCase__ ),filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(UpperCAmelCase__ ) > 0:
return os.path.join(UpperCAmelCase__,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(UpperCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
a__ = cache_path + '.lock'
with FileLock(UpperCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(UpperCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
a__ = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(UpperCAmelCase__,'a+b' ) as f:
yield f
a__ = _resumable_file_manager
if os.path.exists(UpperCAmelCase__ ):
a__ = os.stat(UpperCAmelCase__ ).st_size
else:
a__ = 0
else:
a__ = partial(tempfile.NamedTemporaryFile,dir=UpperCAmelCase__,delete=UpperCAmelCase__ )
a__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s',UpperCAmelCase__,temp_file.name,)
http_get(
UpperCAmelCase__,UpperCAmelCase__,proxies=UpperCAmelCase__,resume_size=UpperCAmelCase__,user_agent=UpperCAmelCase__,)
os.replace(temp_file.name,UpperCAmelCase__ )
a__ = {'url': url, 'etag': etag}
a__ = cache_path + '.json'
with open(UpperCAmelCase__,'w' ) as meta_file:
json.dump(UpperCAmelCase__,UpperCAmelCase__ )
return cache_path
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__=None ) -> Tuple:
'''simple docstring'''
a__ = url.encode('utf-8' )
a__ = shaaaa(UpperCAmelCase__ )
a__ = url_hash.hexdigest()
if etag:
a__ = etag.encode('utf-8' )
a__ = shaaaa(UpperCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__=None,UpperCAmelCase__=False,UpperCAmelCase__=None,UpperCAmelCase__=False,UpperCAmelCase__=None,UpperCAmelCase__=False,UpperCAmelCase__=False,UpperCAmelCase__=False,) -> str:
'''simple docstring'''
if cache_dir is None:
a__ = TRANSFORMERS_CACHE
if isinstance(UpperCAmelCase__,UpperCAmelCase__ ):
a__ = str(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__,UpperCAmelCase__ ):
a__ = str(UpperCAmelCase__ )
if is_remote_url(UpperCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
a__ = get_from_cache(
UpperCAmelCase__,cache_dir=UpperCAmelCase__,force_download=UpperCAmelCase__,proxies=UpperCAmelCase__,resume_download=UpperCAmelCase__,user_agent=UpperCAmelCase__,local_files_only=UpperCAmelCase__,)
elif os.path.exists(UpperCAmelCase__ ):
# File, and it exists.
a__ = url_or_filename
elif urlparse(UpperCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(UpperCAmelCase__ ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(UpperCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(UpperCAmelCase__ ) and not tarfile.is_tarfile(UpperCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
a__ , a__ = os.path.split(UpperCAmelCase__ )
a__ = output_file.replace('.','-' ) + '-extracted'
a__ = os.path.join(UpperCAmelCase__,UpperCAmelCase__ )
if os.path.isdir(UpperCAmelCase__ ) and os.listdir(UpperCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
a__ = output_path + '.lock'
with FileLock(UpperCAmelCase__ ):
shutil.rmtree(UpperCAmelCase__,ignore_errors=UpperCAmelCase__ )
os.makedirs(UpperCAmelCase__ )
if is_zipfile(UpperCAmelCase__ ):
with ZipFile(UpperCAmelCase__,'r' ) as zip_file:
zip_file.extractall(UpperCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(UpperCAmelCase__ ):
a__ = tarfile.open(UpperCAmelCase__ )
tar_file.extractall(UpperCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(UpperCAmelCase__ ) )
return output_path_extracted
return output_path
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__="," ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(UpperCAmelCase__,UpperCAmelCase__ )
if os.path.isfile(UpperCAmelCase__ ):
with open(UpperCAmelCase__ ) as f:
a__ = eval(f.read() )
else:
a__ = requests.get(UpperCAmelCase__ )
try:
a__ = requests.json()
except Exception:
a__ = req.content.decode()
assert data is not None, "could not connect"
try:
a__ = eval(UpperCAmelCase__ )
except Exception:
a__ = data.split('\n' )
req.close()
return data
def _lowerCamelCase ( UpperCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ = requests.get(UpperCAmelCase__ )
a__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _lowerCamelCase ( UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(UpperCAmelCase__ )
with open(UpperCAmelCase__,'rb' ) as stream:
a__ = pkl.load(UpperCAmelCase__ )
a__ = weights.pop('model' )
a__ = {}
for k, v in model.items():
a__ = torch.from_numpy(UpperCAmelCase__ )
if "running_var" in k:
a__ = torch.tensor([0] )
a__ = k.replace('running_var','num_batches_tracked' )
a__ = zero
return new
def _lowerCamelCase ( ) -> int:
'''simple docstring'''
print(f'''{os.path.abspath(os.path.join(UpperCAmelCase__,os.pardir ) )}/demo.ipynb''' )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__="RGB" ) -> Dict:
'''simple docstring'''
assert isinstance(UpperCAmelCase__,UpperCAmelCase__ )
if os.path.isfile(UpperCAmelCase__ ):
a__ = cva.imread(UpperCAmelCase__ )
else:
a__ = get_image_from_url(UpperCAmelCase__ )
assert img is not None, f'''could not connect to: {im}'''
a__ = cva.cvtColor(UpperCAmelCase__,cva.COLOR_BGR2RGB )
if input_format == "RGB":
a__ = img[:, :, ::-1]
return img
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__=1 ) -> str:
'''simple docstring'''
return (images[i : i + batch] for i in range(0,len(UpperCAmelCase__ ),UpperCAmelCase__ ))
| 232
| 0
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@require_torch
def _a ( self ):
lowerCamelCase__ =pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowerCamelCase__ =load_dataset("ashraq/esc50" )
lowerCamelCase__ =dataset["train"]["audio"][-1]["array"]
lowerCamelCase__ =audio_classifier(_lowerCamelCase , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [{"score": 0.5_0_1, "label": "Sound of a dog"}, {"score": 0.4_9_9, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def _a ( self ):
pass
@slow
@require_torch
def _a ( self ):
lowerCamelCase__ =pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowerCamelCase__ =load_dataset("ashraq/esc50" )
lowerCamelCase__ =dataset["train"]["audio"][-1]["array"]
lowerCamelCase__ =audio_classifier(_lowerCamelCase , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
] , )
lowerCamelCase__ =audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowerCamelCase__ =audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def _a ( self ):
pass
| 714
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __UpperCAmelCase ( unittest.TestCase ):
A__ : List[Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
A__ : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =AudioClassificationPipeline(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
# test with a raw waveform
lowerCamelCase__ =np.zeros((34000,) )
lowerCamelCase__ =np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ =examples
lowerCamelCase__ =audio_classifier(_lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_lowerCamelCase , [
{"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )},
{"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )},
] , )
lowerCamelCase__ =audio_classifier(_lowerCamelCase , top_k=1 )
self.assertEqual(
_lowerCamelCase , [
{"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )},
] , )
self.run_torchaudio(_lowerCamelCase )
@require_torchaudio
def _a ( self , _lowerCamelCase ):
import datasets
# test with a local file
lowerCamelCase__ =datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
lowerCamelCase__ =dataset[0]["audio"]["array"]
lowerCamelCase__ =audio_classifier(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )},
{"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )},
] , )
@require_torch
def _a ( self ):
lowerCamelCase__ ="anton-l/wav2vec2-random-tiny-classifier"
lowerCamelCase__ =pipeline("audio-classification" , model=_lowerCamelCase )
lowerCamelCase__ =np.ones((8000,) )
lowerCamelCase__ =audio_classifier(_lowerCamelCase , top_k=4 )
lowerCamelCase__ =[
{"score": 0.0_8_4_2, "label": "no"},
{"score": 0.0_8_3_8, "label": "up"},
{"score": 0.0_8_3_7, "label": "go"},
{"score": 0.0_8_3_4, "label": "right"},
]
lowerCamelCase__ =[
{"score": 0.0_8_4_5, "label": "stop"},
{"score": 0.0_8_4_4, "label": "on"},
{"score": 0.0_8_4_1, "label": "right"},
{"score": 0.0_8_3_4, "label": "left"},
]
self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowerCamelCase__ ={"array": np.ones((8000,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
lowerCamelCase__ =audio_classifier(_lowerCamelCase , top_k=4 )
self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _a ( self ):
import datasets
lowerCamelCase__ ="superb/wav2vec2-base-superb-ks"
lowerCamelCase__ =pipeline("audio-classification" , model=_lowerCamelCase )
lowerCamelCase__ =datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" )
lowerCamelCase__ =np.array(dataset[3]["speech"] , dtype=np.floataa )
lowerCamelCase__ =audio_classifier(_lowerCamelCase , top_k=4 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=3 ) , [
{"score": 0.9_8_1, "label": "go"},
{"score": 0.0_0_7, "label": "up"},
{"score": 0.0_0_6, "label": "_unknown_"},
{"score": 0.0_0_1, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def _a ( self ):
pass
| 132
| 0
|
'''simple docstring'''
import numpy as np
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1E-12 , SCREAMING_SNAKE_CASE__ = 100 , ):
assert np.shape(SCREAMING_SNAKE_CASE__ )[0] == np.shape(SCREAMING_SNAKE_CASE__ )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE__ )[0] == np.shape(SCREAMING_SNAKE_CASE__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE__ ) == np.iscomplexobj(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__a : Dict = False
__a : List[str] = 0
__a : Union[str, Any] = 0
__a : Optional[Any] = 1E12
while not convergence:
# Multiple matrix by the vector.
__a : List[Any] = np.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Normalize the resulting output vector.
__a : Dict = w / np.linalg.norm(SCREAMING_SNAKE_CASE__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__a : Dict = vector.conj().T if is_complex else vector.T
__a : Union[str, Any] = np.dot(SCREAMING_SNAKE_CASE__ , np.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Check convergence.
__a : str = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__a : Tuple = True
__a : Dict = lambda_
if is_complex:
__a : Optional[Any] = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase__ ( ):
__a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__a : Any = np.array([41, 4, 20] )
__a : Any = real_input_matrix.astype(np.complexaaa )
__a : Union[str, Any] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__a : str = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__a : Union[str, Any] = real_input_matrix
__a : Optional[Any] = real_vector
elif problem_type == "complex":
__a : Tuple = complex_input_matrix
__a : Any = complex_vector
# Our implementation.
__a , __a : List[Any] = power_iteration(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__a , __a : Tuple = np.linalg.eigh(SCREAMING_SNAKE_CASE__ )
# Last eigenvalue is the maximum one.
__a : Any = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__a : str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE__ ) - np.abs(SCREAMING_SNAKE_CASE__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 597
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __magic_name__ ( self , _A ) -> float:
return 0.0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__a : List[Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Dict = 512
__a : Dict = [1] + [0] * (size - 1)
__a : Optional[int] = [filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__a : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__a : Union[str, Any] = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = 20 * np.logaa(SCREAMING_SNAKE_CASE__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__a : Dict = get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(SCREAMING_SNAKE_CASE__ )
plt.show()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = 512
__a : List[str] = [1] + [0] * (size - 1)
__a : List[str] = [filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__a : Any = [0] * (samplerate - size) # zero-padding
outputs += filler
__a : Tuple = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) )
plt.show()
| 597
| 1
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ = {'target_lang': 'fi', 'source_lang': 'en'}
lowercase__ = '>>zh<<'
lowercase__ = 'Helsinki-NLP/'
if is_torch_available():
lowercase__ = 'pt'
elif is_tf_available():
lowercase__ = 'tf'
else:
lowercase__ = 'jax'
@require_sentencepiece
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = MarianTokenizer
a__ = False
a__ = True
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
super().setUp()
a__: int = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
a__: Dict = dict(zip(lowercase , range(len(lowercase))))
a__: Union[str, Any] = Path(self.tmpdirname)
save_json(lowercase , save_dir / VOCAB_FILES_NAMES['vocab'])
save_json(lowercase , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase , save_dir / VOCAB_FILES_NAMES['source_spm'])
copyfile(lowercase , save_dir / VOCAB_FILES_NAMES['target_spm'])
a__: int = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> MarianTokenizer:
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase)
def lowerCamelCase_ ( self , lowercase) -> str:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: int = '</s>'
a__: List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Any = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '</s>')
self.assertEqual(vocab_keys[1] , '<unk>')
self.assertEqual(vocab_keys[-1] , '<pad>')
self.assertEqual(len(lowercase) , 9)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de')
a__: str = en_de_tokenizer(['I am a small frog'] , return_tensors=lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: str = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase , batch.input_ids[0])
a__: int = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase)
a__: Dict = [x.name for x in Path(lowercase).glob('*')]
self.assertIn('source.spm' , lowercase)
MarianTokenizer.from_pretrained(lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: List[Any] = self.get_tokenizer()
a__: int = tok(
['I am a small frog' * 10_00, 'I am a small frog'] , padding=lowercase , truncation=lowercase , return_tensors=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(batch.input_ids.shape , (2, 5_12))
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = self.get_tokenizer()
a__: List[str] = tok(['I am a tiny frog', 'I am a small frog'] , padding=lowercase , return_tensors=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(batch_smaller.input_ids.shape , (2, 10))
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Dict = {'input_ids': [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs')
a__: Optional[int] = 'Tämä on testi'
a__: Tuple = 'This is a test'
a__: Union[str, Any] = [76, 7, 20_47, 2]
a__: Dict = [69, 12, 11, 9_40, 2]
a__: str = tokenizer(lowercase).input_ids
self.assertListEqual(lowercase , lowercase)
a__: List[str] = tokenizer(text_target=lowercase).input_ids
self.assertListEqual(lowercase , lowercase)
a__: Tuple = tokenizer.decode(lowercase , skip_special_tokens=lowercase)
self.assertEqual(lowercase , lowercase)
| 217
|
"""simple docstring"""
from math import ceil
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
a__: List[Any] = list(range(0 , _SCREAMING_SNAKE_CASE ) )
a__: Dict = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
a__: Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_SCREAMING_SNAKE_CASE )
# Missing blocks
a__: Any = [i for i in blocks if i not in device_map_blocks]
a__: List[str] = [i for i in device_map_blocks if i not in blocks]
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(_SCREAMING_SNAKE_CASE ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Any = list(range(_SCREAMING_SNAKE_CASE ) )
a__: Optional[Any] = int(ceil(n_layers / len(_SCREAMING_SNAKE_CASE ) ) )
a__: List[str] = [layers[i : i + n_blocks] for i in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
| 217
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'open-llama'
def __init__( self , lowercase_=100_000 , lowercase_=4_096 , lowercase_=11_008 , lowercase_=32 , lowercase_=32 , lowercase_="silu" , lowercase_=2_048 , lowercase_=0.02 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=False , lowercase_=True , lowercase_=0.1 , lowercase_=0.1 , lowercase_=True , lowercase_=True , lowercase_=None , **lowercase_ , ):
_snake_case : Tuple = vocab_size
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Dict = hidden_size
_snake_case : Tuple = intermediate_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Tuple = hidden_act
_snake_case : str = initializer_range
_snake_case : int = rms_norm_eps
_snake_case : Dict = use_cache
_snake_case : Optional[Any] = kwargs.pop(
"use_memorry_efficient_attention" , lowercase_ )
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Optional[Any] = attention_dropout_prob
_snake_case : Union[str, Any] = use_stable_embedding
_snake_case : Optional[int] = shared_input_output_embedding
_snake_case : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def UpperCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
_snake_case : List[Any] = self.rope_scaling.get("type" , lowercase_ )
_snake_case : str = self.rope_scaling.get("factor" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 670
|
from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case ,_snake_case : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_snake_case : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
__SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 670
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase__ ( __lowercase ):
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , a , a , a , a , a , a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = DistilBertModel(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(a , a )
_UpperCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , a , a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , a , a , a , a , a , a ) -> Dict:
'''simple docstring'''
_UpperCamelCase = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , a , a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
_UpperCamelCase = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , a , a , a , a , a , a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , a , a , a , a , a , a ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase_ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ : Optional[int] = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : Any = True
UpperCamelCase_ : Optional[Any] = True
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = DistilBertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=a , dim=37 )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def A_ ( self ) -> Dict:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=a )
_UpperCamelCase = self._prepare_for_class(a , a )
_UpperCamelCase = torch.jit.trace(
a , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , """traced_model.pt""" ) )
_UpperCamelCase = torch.jit.load(os.path.join(a , """traced_model.pt""" ) , map_location=a )
loaded(inputs_dict["""input_ids"""].to(a ) , inputs_dict["""attention_mask"""].to(a ) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase = model(a , attention_mask=a )[0]
_UpperCamelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a )
_UpperCamelCase = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 709
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __A(lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if "model" in orig_key:
_UpperCamelCase = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
_UpperCamelCase = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
_UpperCamelCase = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
_UpperCamelCase = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
_UpperCamelCase = orig_key.split(""".""" )[0].split("""_""" )[-1]
_UpperCamelCase = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
_UpperCamelCase = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
_UpperCamelCase = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
_UpperCamelCase = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
_UpperCamelCase = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
_UpperCamelCase = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
_UpperCamelCase = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
_UpperCamelCase = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
_UpperCamelCase = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
_UpperCamelCase = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
_UpperCamelCase = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
_UpperCamelCase = """yoso.""" + orig_key
return orig_key
def __A(lowerCAmelCase , lowerCAmelCase ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase = orig_state_dict.pop(lowerCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_UpperCamelCase = val
_UpperCamelCase = orig_state_dict["""cls.predictions.decoder.bias"""]
_UpperCamelCase = torch.arange(lowerCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )["""model_state_dict"""]
_UpperCamelCase = YosoConfig.from_json_file(lowerCAmelCase )
_UpperCamelCase = YosoForMaskedLM(lowerCAmelCase )
_UpperCamelCase = convert_checkpoint_helper(config.max_position_embeddings , lowerCAmelCase )
print(model.load_state_dict(lowerCAmelCase ) )
model.eval()
model.save_pretrained(lowerCAmelCase )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 202
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase :str = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , *lowercase , **lowercase ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , lowercase , )
super().__init__(*lowercase , **lowercase )
| 667
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase )
| 667
| 1
|
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowercase ( _A , _A , _A=0 ) -> Optional[Any]:
# Format the message.
if name is None:
SCREAMING_SNAKE_CASE : Optional[int] = None
else:
SCREAMING_SNAKE_CASE : Optional[int] = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
SCREAMING_SNAKE_CASE : List[Any] = fmt.format(_A )
# Print and recurse (if needed).
if isinstance(_A , _A ):
if msg is not None:
print(_A )
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2 )
elif isinstance(_A , torch.Tensor ):
print(_A , """:""" , val.size() )
else:
print(_A , """:""" , _A )
def __lowercase ( _A , _A , _A , _A , _A ) -> Tuple:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
SCREAMING_SNAKE_CASE : List[Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
SCREAMING_SNAKE_CASE : Any = (num_heads, hidden_size, num_splits) + input_shape[1:]
SCREAMING_SNAKE_CASE : Union[str, Any] = param.view(*_A )
SCREAMING_SNAKE_CASE : List[str] = param.transpose(0 , 2 )
SCREAMING_SNAKE_CASE : List[str] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
SCREAMING_SNAKE_CASE : Optional[int] = (num_heads, num_splits, hidden_size) + input_shape[1:]
SCREAMING_SNAKE_CASE : Optional[Any] = param.view(*_A )
SCREAMING_SNAKE_CASE : str = param.transpose(0 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : str = param.view(*_A )
return param
def __lowercase ( _A , _A , _A ) -> Union[str, Any]:
# The converted output model.
SCREAMING_SNAKE_CASE : Optional[int] = {}
# old versions did not store training args
SCREAMING_SNAKE_CASE : str = input_state_dict.get("""args""" , _A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
SCREAMING_SNAKE_CASE : List[str] = ds_args.padded_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = ds_args.max_position_embeddings
SCREAMING_SNAKE_CASE : Any = ds_args.hidden_size
SCREAMING_SNAKE_CASE : List[str] = ds_args.num_layers
SCREAMING_SNAKE_CASE : int = ds_args.num_attention_heads
SCREAMING_SNAKE_CASE : Dict = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
SCREAMING_SNAKE_CASE : List[Any] = config.n_head
# The hidden_size per head.
SCREAMING_SNAKE_CASE : Optional[int] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
SCREAMING_SNAKE_CASE : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
SCREAMING_SNAKE_CASE : List[Any] = 0.0
# The model.
SCREAMING_SNAKE_CASE : List[str] = input_state_dict["""model"""]
# The language model.
SCREAMING_SNAKE_CASE : List[str] = model["""language_model"""]
# The embeddings.
SCREAMING_SNAKE_CASE : Any = lm["""embedding"""]
# The word embeddings.
SCREAMING_SNAKE_CASE : int = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
SCREAMING_SNAKE_CASE : int = word_embeddings[: config.vocab_size, :]
SCREAMING_SNAKE_CASE : Optional[int] = word_embeddings
# The position embeddings.
SCREAMING_SNAKE_CASE : Dict = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
SCREAMING_SNAKE_CASE : Dict = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
SCREAMING_SNAKE_CASE : Union[str, Any] = pos_embeddings
# The transformer.
SCREAMING_SNAKE_CASE : int = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
SCREAMING_SNAKE_CASE : List[Any] = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
SCREAMING_SNAKE_CASE : Any = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_re.match(_A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
SCREAMING_SNAKE_CASE : Any = int(m.group(1 ) )
# The name of the operation.
SCREAMING_SNAKE_CASE : List[Any] = m.group(2 )
# Is it a weight or a bias?
SCREAMING_SNAKE_CASE : int = m.group(3 )
# The name of the layer.
SCREAMING_SNAKE_CASE : List[Any] = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
SCREAMING_SNAKE_CASE : List[Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
SCREAMING_SNAKE_CASE : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
SCREAMING_SNAKE_CASE : List[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _A , _A )
SCREAMING_SNAKE_CASE : str = causal_mask
# Insert a "dummy" tensor for masked_bias.
SCREAMING_SNAKE_CASE : Dict = torch.tensor(-1e4 , dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = masked_bias
SCREAMING_SNAKE_CASE : Any = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
SCREAMING_SNAKE_CASE : Optional[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
SCREAMING_SNAKE_CASE : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
SCREAMING_SNAKE_CASE : Dict = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Store. No change of shape.
SCREAMING_SNAKE_CASE : Optional[int] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
SCREAMING_SNAKE_CASE : int = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE : Union[str, Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
SCREAMING_SNAKE_CASE : Union[str, Any] = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE : List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
SCREAMING_SNAKE_CASE : Optional[int] = transformer["""final_layernorm.weight"""]
SCREAMING_SNAKE_CASE : Any = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
SCREAMING_SNAKE_CASE : int = word_embeddings
# It should be done!
return output_state_dict
def __lowercase ( ) -> List[Any]:
# Create the argument parser.
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=_A , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=_A , help="""An optional config json file describing the pre-trained model.""" , )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
# Extract the basename.
SCREAMING_SNAKE_CASE : Dict = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
SCREAMING_SNAKE_CASE : Tuple = torch.load(_A , map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
SCREAMING_SNAKE_CASE : Optional[Any] = input_state_dict.get("""args""" , _A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
SCREAMING_SNAKE_CASE : Any = """gelu_fast"""
elif ds_args.openai_gelu:
SCREAMING_SNAKE_CASE : Optional[Any] = """gelu_new"""
else:
SCREAMING_SNAKE_CASE : Tuple = """gelu"""
else:
# in the very early days this used to be "gelu_new"
SCREAMING_SNAKE_CASE : int = """gelu_new"""
# Spell out all parameters in case the defaults change.
SCREAMING_SNAKE_CASE : Dict = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=50256 , eos_token_id=50256 , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
SCREAMING_SNAKE_CASE : Tuple = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
SCREAMING_SNAKE_CASE : Tuple = convert_megatron_checkpoint(_A , _A , _A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
SCREAMING_SNAKE_CASE : List[str] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
SCREAMING_SNAKE_CASE : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
SCREAMING_SNAKE_CASE : Optional[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = """gpt2"""
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = type(_A ).__name__
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(_A )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_A )
# Store the state_dict to file.
SCREAMING_SNAKE_CASE : str = os.path.join(_A , """pytorch_model.bin""" )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_A , _A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 711
|
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] =["""input_features""", """attention_mask"""]
def __init__( self : List[str] , UpperCAmelCase__ : Optional[Any]=8_0 , UpperCAmelCase__ : List[str]=1_6_0_0_0 , UpperCAmelCase__ : Optional[int]=8_0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[int] , ) ->Optional[int]:
"""simple docstring"""
super().__init__(feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = num_mel_bins
SCREAMING_SNAKE_CASE : Dict = do_ceptral_normalize
SCREAMING_SNAKE_CASE : Optional[int] = normalize_means
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_vars
SCREAMING_SNAKE_CASE : Union[str, Any] = True
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , ) ->np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(UpperCAmelCase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = ta_kaldi.fbank(UpperCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowercase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[bool] = True , UpperCAmelCase__ : Optional[bool] = True , UpperCAmelCase__ : float = 0.0 , ) ->np.ndarray:
"""simple docstring"""
if normalize_means:
SCREAMING_SNAKE_CASE : Tuple = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE : List[Any] = np.subtract(UpperCAmelCase__ , UpperCAmelCase__ )
if normalize_vars:
SCREAMING_SNAKE_CASE : Optional[Any] = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.divide(UpperCAmelCase__ , UpperCAmelCase__ )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE : Tuple = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE : Optional[Any] = x.astype(np.floataa )
return x
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[np.ndarray] , UpperCAmelCase__ : Optional[np.ndarray] = None ) ->List[np.ndarray]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase__ , UpperCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCAmelCase__ , UpperCAmelCase__ )
]
def __call__( self : str , UpperCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
SCREAMING_SNAKE_CASE : Tuple = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : Optional[Any] = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Dict = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Optional[Any] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE : Tuple = [self._extract_fbank_features(UpperCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({"""input_features""": features} )
SCREAMING_SNAKE_CASE : str = self.pad(
UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
# make sure list is in array format
SCREAMING_SNAKE_CASE : Dict = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE : int = [np.asarray(UpperCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE : Tuple = (
np.array(UpperCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE : str = self.normalize(
padded_inputs["""input_features"""] , attention_mask=UpperCAmelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Any = padded_inputs.convert_to_tensors(UpperCAmelCase__ )
return padded_inputs
| 446
| 0
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any ,lowercase__ : Tuple ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 3_2 ,lowercase__ : bool = True ,lowercase__ : Union[int, float] = 1 / 2_5_5 ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,lowercase__ : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,lowercase__ : bool = True ,lowercase__ : Any=7 ,lowercase__ : Optional[int]=3_0 ,lowercase__ : Tuple=4_0_0 ,lowercase__ : List[Any]=3 ,):
__lowercase = parent
__lowercase = do_resize
__lowercase = size if size is not None else {'''shortest_edge''': 2_8_8}
__lowercase = size_divisor
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = do_center_crop
__lowercase = image_mean
__lowercase = image_std
__lowercase = do_pad
__lowercase = batch_size
__lowercase = num_channels
__lowercase = min_resolution
__lowercase = max_resolution
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int]=False ):
if not batched:
__lowercase = self.size['''shortest_edge''']
__lowercase = image_inputs[0]
if isinstance(lowercase__ ,Image.Image ):
__lowercase , __lowercase = image.size
else:
__lowercase , __lowercase = image.shape[1], image.shape[2]
__lowercase = size / min(lowercase__ ,lowercase__ )
if h < w:
__lowercase , __lowercase = size, scale * w
else:
__lowercase , __lowercase = scale * h, size
__lowercase = int((1_3_3_3 / 8_0_0) * size )
if max(lowercase__ ,lowercase__ ) > max_size:
__lowercase = max_size / max(lowercase__ ,lowercase__ )
__lowercase = newh * scale
__lowercase = neww * scale
__lowercase , __lowercase = int(newh + 0.5 ), int(neww + 0.5 )
__lowercase , __lowercase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowercase = []
for image in image_inputs:
__lowercase , __lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase = max(lowercase__ ,key=lambda lowercase__ : item[0] )[0]
__lowercase = max(lowercase__ ,key=lambda lowercase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BridgeTowerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = BridgeTowerImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ ,'''image_std''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ ,'''size''' ) )
self.assertTrue(hasattr(lowercase__ ,'''size_divisor''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# Initialize image processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ ,batched=lowercase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def SCREAMING_SNAKE_CASE ( self : Dict ):
# Initialize image processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ ,batched=lowercase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# Initialize image processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ ,batched=lowercase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
| 41
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """Speech2TextFeatureExtractor"""
__snake_case = """Speech2TextTokenizer"""
def __init__( self: int , a: Tuple , a: Optional[Any] ):
super().__init__(a , a )
__lowerCamelCase : List[str] = self.feature_extractor
__lowerCamelCase : Optional[Any] = False
def __call__( self: Optional[int] , *a: Optional[Any] , **a: str ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a , **a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__lowerCamelCase : Any = kwargs.pop('raw_speech' )
else:
__lowerCamelCase : List[Any] = kwargs.pop('audio' , a )
__lowerCamelCase : List[str] = kwargs.pop('sampling_rate' , a )
__lowerCamelCase : Any = kwargs.pop('text' , a )
if len(a ) > 0:
__lowerCamelCase : List[Any] = args[0]
__lowerCamelCase : List[str] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__lowerCamelCase : Tuple = self.feature_extractor(a , *a , sampling_rate=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , **a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCamelCase : Dict = encodings['input_ids']
return inputs
def _snake_case ( self: Union[str, Any] , *a: str , **a: List[Any] ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: str , **a: List[Any] ):
return self.tokenizer.decode(*a , **a )
@contextmanager
def _snake_case ( self: Tuple ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Union[str, Any] = self.tokenizer
yield
__lowerCamelCase : Dict = self.feature_extractor
__lowerCamelCase : Tuple = False
| 706
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 230
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DDIMPipeline
lowerCAmelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowerCAmelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase__ = False
def __lowerCamelCase ( self : Any):
'''simple docstring'''
torch.manual_seed(0)
__lowercase =UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
__lowercase =DDIMScheduler()
__lowercase ={'unet': unet, 'scheduler': scheduler}
return components
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any=0):
'''simple docstring'''
if str(lowercase__).startswith('mps'):
__lowercase =torch.manual_seed(lowercase__)
else:
__lowercase =torch.Generator(device=lowercase__).manual_seed(lowercase__)
__lowercase ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase ='cpu'
__lowercase =self.get_dummy_components()
__lowercase =self.pipeline_class(**lowercase__)
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
__lowercase =self.get_dummy_inputs(lowercase__)
__lowercase =pipe(**lowercase__).images
__lowercase =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3))
__lowercase =np.array(
[1.0_00e00, 5.7_17e-01, 4.7_17e-01, 1.0_00e00, 0.0_00e00, 1.0_00e00, 3.0_00e-04, 0.0_00e00, 9.0_00e-04])
__lowercase =np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowercase__ , 1e-3)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase ='google/ddpm-cifar10-32'
__lowercase =UNetaDModel.from_pretrained(lowercase__)
__lowercase =DDIMScheduler()
__lowercase =DDIMPipeline(unet=lowercase__ , scheduler=lowercase__)
ddim.to(lowercase__)
ddim.set_progress_bar_config(disable=lowercase__)
__lowercase =torch.manual_seed(0)
__lowercase =ddim(generator=lowercase__ , eta=0.0 , output_type='numpy').images
__lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase =np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase ='google/ddpm-ema-bedroom-256'
__lowercase =UNetaDModel.from_pretrained(lowercase__)
__lowercase =DDIMScheduler.from_pretrained(lowercase__)
__lowercase =DDIMPipeline(unet=lowercase__ , scheduler=lowercase__)
ddpm.to(lowercase__)
ddpm.set_progress_bar_config(disable=lowercase__)
__lowercase =torch.manual_seed(0)
__lowercase =ddpm(generator=lowercase__ , output_type='numpy').images
__lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowercase =np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 474
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , lowercase__ ) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
SCREAMING_SNAKE_CASE : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[int] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = 'sgugger/tiny-distilbert-classification'
SCREAMING_SNAKE_CASE : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , only_pretrain_model=lowercase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Dict = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , torchscript=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Any = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , fpaa=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowercase__ )
# set architectures equal to `None`
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : Tuple = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase__ , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : Dict = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Any = 'sshleifer/tinier_bart'
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : List[str] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = 'sshleifer/tinier_bart'
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , save_to_csv=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase__ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase__ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase__ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase__ , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase__ , 'env.csv' ) , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : Any = PyTorchBenchmark(lowercase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase__ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ , 'env.csv' ) ).exists() )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase__ ):
self.assertTrue(hasattr(lowercase__ , 'sequential' ) )
self.assertTrue(hasattr(lowercase__ , 'cumulative' ) )
self.assertTrue(hasattr(lowercase__ , 'current' ) )
self.assertTrue(hasattr(lowercase__ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase__ , 'log.txt' ) , log_print=lowercase__ , trace_memory_line_by_line=lowercase__ , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase__ , 'log.txt' ) ).exists() )
| 251
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
UpperCAmelCase : ClassVar[Features] = Features({'audio': Audio()} )
UpperCAmelCase : ClassVar[Features] = Features({'transcription': Value('string' )} )
UpperCAmelCase : str = "audio"
UpperCAmelCase : str = "transcription"
def snake_case_ ( self : Tuple , __snake_case : Union[str, Any] ) -> Any:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __snake_case ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
_a : Any = copy.deepcopy(self )
_a : Optional[Any] = self.input_schema.copy()
_a : Optional[int] = features[self.audio_column]
_a : int = input_schema
return task_template
@property
def snake_case_ ( self : List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 713
|
from __future__ import annotations
import numpy as np
def lowerCamelCase_ ( UpperCamelCase_ ):
return np.maximum(0 , UpperCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 249
| 0
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=30 ,__lowerCamelCase=2 ,__lowerCamelCase=3 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=32 ,__lowerCamelCase=2 ,__lowerCamelCase=4 ,__lowerCamelCase=37 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=10 ,__lowerCamelCase=0.02 ,__lowerCamelCase=3 ,__lowerCamelCase=0.6 ,__lowerCamelCase=None ,) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : Optional[int] = num_channels
lowerCAmelCase__ : Dict = is_training
lowerCAmelCase__ : Union[str, Any] = use_labels
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[str] = mask_ratio
lowerCAmelCase__ : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase__ : str = (image_size // patch_size) ** 2
lowerCAmelCase__ : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : int = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : str = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=a_ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = TFViTMAEModel(config=a_ )
lowerCAmelCase__ : List[Any] = model(a_ ,training=a_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = TFViTMAEForPreTraining(a_ )
lowerCAmelCase__ : Optional[Any] = model(a_ ,training=a_ )
# expected sequence length = num_patches
lowerCAmelCase__ : Dict = (self.image_size // self.patch_size) ** 2
lowerCAmelCase__ : Dict = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : str = TFViTMAEForPreTraining(a_ )
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[int] = model(a_ ,training=a_ )
lowerCAmelCase__ : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
(lowerCAmelCase__) : Dict = config_and_inputs
lowerCAmelCase__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( a_ , a_ , unittest.TestCase):
'''simple docstring'''
snake_case_ =(TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
snake_case_ ={"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =False
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = TFViTMAEModelTester(self )
lowerCAmelCase__ : List[str] = ConfigTester(self ,config_class=a_ ,has_text_modality=a_ ,hidden_size=37 )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
lowerCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ ,tf.keras.layers.Layer ) )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(a_ )
lowerCAmelCase__ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,a_ )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a_ )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
np.random.seed(2 )
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : str = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(a_ )
lowerCAmelCase__ : int = self._prepare_for_class(a_ ,a_ )
lowerCAmelCase__ : Any = model(a_ ,noise=a_ )
lowerCAmelCase__ : int = copy.deepcopy(self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ : Any = model(**a_ ,noise=a_ )
lowerCAmelCase__ : Union[str, Any] = outputs_dict[0].numpy()
lowerCAmelCase__ : Any = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1e-6 )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__lowerCamelCase ):
lowerCAmelCase__ : Union[str, Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(a_ ):
lowerCAmelCase__ : Optional[int] = v.numpy()
else:
lowerCAmelCase__ : List[Any] = np.array(a_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(a_ )
lowerCAmelCase__ : Optional[Any] = self._prepare_for_class(a_ ,a_ )
lowerCAmelCase__ : Dict = prepare_numpy_arrays(a_ )
lowerCAmelCase__ : Optional[int] = model(a_ ,noise=a_ )
lowerCAmelCase__ : Optional[int] = model(**a_ ,noise=a_ )
self.assert_outputs_same(a_ ,a_ )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
lowerCAmelCase__ : str = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCAmelCase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase__ : Tuple = tf.constant(a_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase__ : Any = tf_noise
super().check_pt_tf_models(a_ ,a_ ,a_ )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(a_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(a_ ,a_ ),)
if isinstance(a_ ,a_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(a_ ,'''_keras_serializable''' ,a_ )
}
lowerCAmelCase__ : int = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase__ : Dict = tf.convert_to_tensor(a_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowerCAmelCase__ : Dict = main_layer_class(a_ )
lowerCAmelCase__ : int = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCAmelCase__ : str = tf.keras.Model(a_ ,outputs=main_layer(a_ ) )
lowerCAmelCase__ : Tuple = model(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[int] = os.path.join(a_ ,'''keras_model.h5''' )
model.save(a_ )
lowerCAmelCase__ : List[Any] = tf.keras.models.load_model(
a_ ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(a_ ,tf.keras.Model )
lowerCAmelCase__ : int = model(a_ )
self.assert_outputs_same(a_ ,a_ )
@slow
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Any = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase__ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = model_class(a_ )
lowerCAmelCase__ : List[Any] = self._prepare_for_class(a_ ,a_ )
lowerCAmelCase__ : Any = model(a_ ,noise=a_ )
if model_class.__name__ == "TFViTMAEModel":
lowerCAmelCase__ : Optional[int] = outputs.last_hidden_state.numpy()
lowerCAmelCase__ : List[str] = 0
else:
lowerCAmelCase__ : List[Any] = outputs.logits.numpy()
lowerCAmelCase__ : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a_ ,saved_model=a_ )
lowerCAmelCase__ : Union[str, Any] = model_class.from_pretrained(a_ )
lowerCAmelCase__ : Dict = model(a_ ,noise=a_ )
if model_class.__name__ == "TFViTMAEModel":
lowerCAmelCase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowerCAmelCase__ : str = 0
else:
lowerCAmelCase__ : Union[str, Any] = after_outputs["logits"].numpy()
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a_ ,1e-5 )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(a_ )
lowerCAmelCase__ : int = self._prepare_for_class(a_ ,a_ )
lowerCAmelCase__ : Optional[Any] = model(a_ ,noise=a_ )
lowerCAmelCase__ : Optional[int] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(a_ )
lowerCAmelCase__ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCAmelCase__ : Dict = model_class.from_config(model.config )
lowerCAmelCase__ : List[str] = new_model(a_ ) # Build model
new_model.set_weights(model.get_weights() )
lowerCAmelCase__ : List[Any] = new_model(a_ ,noise=a_ )
self.assert_outputs_same(a_ ,a_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.''' )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(a_ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_tf
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowerCAmelCase__ : Union[str, Any] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowerCAmelCase__ : List[str] = self.default_image_processor
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Tuple = image_processor(images=a_ ,return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase__ : List[str] = ViTMAEConfig()
lowerCAmelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCAmelCase__ : str = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCAmelCase__ : int = model(**a_ ,noise=a_ )
# verify the logits
lowerCAmelCase__ : str = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape ,a_ )
lowerCAmelCase__ : List[Any] = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,a_ ,atol=1e-4 )
| 647
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> str:
a_ : Tuple = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : Any = downstream_dict["projector.weight"]
a_ : Dict = downstream_dict["projector.bias"]
a_ : Tuple = downstream_dict["model.post_net.linear.weight"]
a_ : int = downstream_dict["model.post_net.linear.bias"]
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = downstream_dict["model.linear.weight"]
a_ : List[Any] = downstream_dict["model.linear.bias"]
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
a_ : int = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : Any = downstream_dict["connector.weight"]
a_ : Tuple = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ : List[str] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ : int = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ : Any = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ : str = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ : List[str] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Tuple:
a_ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__, map_location="cpu" )
a_ : List[str] = checkpoint["Downstream"]
a_ : Union[str, Any] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE__, return_attention_mask=SCREAMING_SNAKE_CASE__, do_normalize=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ : int = convert_classification(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ : Any = convert_diarization(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith("ForXVector" ):
a_ : Any = convert_xvector(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ : Tuple = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 237
| 0
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : str = logging.get_logger(__name__)
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
def __init__( self , *A , **A ) ->None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , A , )
super().__init__(*A , **A )
| 433
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Optional[int] = ["""image_processor""", """tokenizer"""]
__a : int = """OwlViTImageProcessor"""
__a : Optional[int] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , A=None , A=None , **A ) ->str:
UpperCAmelCase__ :str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , A , )
UpperCAmelCase__ :List[Any] = kwargs.pop('feature_extractor' )
UpperCAmelCase__ :Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A , A )
def __call__( self , A=None , A=None , A=None , A="max_length" , A="np" , **A ) ->Tuple:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A , A ) or (isinstance(A , A ) and not isinstance(text[0] , A )):
UpperCAmelCase__ :Optional[Any] = [self.tokenizer(A , padding=A , return_tensors=A , **A )]
elif isinstance(A , A ) and isinstance(text[0] , A ):
UpperCAmelCase__ :Dict = []
# Maximum number of queries across batch
UpperCAmelCase__ :Dict = max([len(A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A ) != max_num_queries:
UpperCAmelCase__ :List[str] = t + [' '] * (max_num_queries - len(A ))
UpperCAmelCase__ :List[str] = self.tokenizer(A , padding=A , return_tensors=A , **A )
encodings.append(A )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
UpperCAmelCase__ :Any = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :int = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ :int = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ :List[str] = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
UpperCAmelCase__ :int = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ :List[str] = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :List[str] = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
UpperCAmelCase__ :Optional[int] = BatchEncoding()
UpperCAmelCase__ :Any = input_ids
UpperCAmelCase__ :str = attention_mask
if query_images is not None:
UpperCAmelCase__ :Optional[int] = BatchEncoding()
UpperCAmelCase__ :Tuple = self.image_processor(
A , return_tensors=A , **A ).pixel_values
UpperCAmelCase__ :str = query_pixel_values
if images is not None:
UpperCAmelCase__ :Optional[int] = self.image_processor(A , return_tensors=A , **A )
if text is not None and images is not None:
UpperCAmelCase__ :Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ :int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def A__ ( self , *A , **A ) ->Tuple:
return self.image_processor.post_process(*A , **A )
def A__ ( self , *A , **A ) ->Tuple:
return self.image_processor.post_process_object_detection(*A , **A )
def A__ ( self , *A , **A ) ->Any:
return self.image_processor.post_process_image_guided_detection(*A , **A )
def A__ ( self , *A , **A ) ->Optional[int]:
return self.tokenizer.batch_decode(*A , **A )
def A__ ( self , *A , **A ) ->Dict:
return self.tokenizer.decode(*A , **A )
@property
def A__ ( self ) ->Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , A , )
return self.image_processor_class
@property
def A__ ( self ) ->Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , A , )
return self.image_processor
| 433
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowercase : Union[str, Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __a ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCAmelCase = "lm_head"
lowerCAmelCase = getattr(A__ , A__ )
if weight_type is not None:
lowerCAmelCase = getattr(A__ , A__ ).shape
else:
lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __a ( A__ , A__ , A__ ) -> Tuple:
lowerCAmelCase = []
lowerCAmelCase = fairseq_model.state_dict()
lowerCAmelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(A__ )[0].split("." )[-2]
lowerCAmelCase = mapped_key.replace("*" , A__ )
if "weight_g" in name:
lowerCAmelCase = "weight_g"
elif "weight_v" in name:
lowerCAmelCase = "weight_v"
elif "bias" in name:
lowerCAmelCase = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase = "weight"
else:
lowerCAmelCase = None
set_recursively(A__ , A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(f"Unused weights: {unused_weights}" )
def __a ( A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
lowerCAmelCase = full_name.split("conv_layers." )[-1]
lowerCAmelCase = name.split("." )
lowerCAmelCase = int(items[0] )
lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A__ )
@torch.no_grad()
def __a ( A__ , A__ , A__=None , A__=None , A__=True ) -> List[str]:
if config_path is not None:
lowerCAmelCase = UniSpeechConfig.from_pretrained(A__ )
else:
lowerCAmelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase = Dictionary.load_from_json(A__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase = target_dict.pad_index
lowerCAmelCase = target_dict.bos_index
lowerCAmelCase = target_dict.eos_index
lowerCAmelCase = len(target_dict.symbols )
lowerCAmelCase = os.path.join(A__ , "vocab.json" )
if not os.path.isdir(A__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A__ ) )
return
os.makedirs(A__ , exist_ok=A__ )
lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase = 42
lowerCAmelCase = 43
with open(A__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(A__ , A__ )
lowerCAmelCase = WavaVecaPhonemeCTCTokenizer(
A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=A__ , )
lowerCAmelCase = True if config.feat_extract_norm == "layer" else False
lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
lowerCAmelCase = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
processor.save_pretrained(A__ )
lowerCAmelCase = UniSpeechForCTC(A__ )
else:
lowerCAmelCase = UniSpeechForPreTraining(A__ )
if is_finetuned:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase = model[0].eval()
recursively_load_weights(A__ , A__ , A__ )
hf_unispeech.save_pretrained(A__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase : str = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 649
|
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
lowercase : Dict = None
lowercase : Union[str, Any] = {
'7B': 1_1_0_0_8,
'13B': 1_3_8_2_4,
'30B': 1_7_9_2_0,
'65B': 2_2_0_1_6,
'70B': 2_8_6_7_2,
}
lowercase : Tuple = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def __a ( A__ , A__=1 , A__=256 ) -> List[str]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __a ( A__ ) -> Optional[int]:
with open(A__ , "r" ) as f:
return json.load(A__ )
def __a ( A__ , A__ ) -> Any:
with open(A__ , "w" ) as f:
json.dump(A__ , A__ )
def __a ( A__ , A__ , A__ , A__=True ) -> Union[str, Any]:
os.makedirs(A__ , exist_ok=A__ )
lowerCAmelCase = os.path.join(A__ , "tmp" )
os.makedirs(A__ , exist_ok=A__ )
lowerCAmelCase = read_json(os.path.join(A__ , "params.json" ) )
lowerCAmelCase = NUM_SHARDS[model_size]
lowerCAmelCase = params["n_layers"]
lowerCAmelCase = params["n_heads"]
lowerCAmelCase = n_heads // num_shards
lowerCAmelCase = params["dim"]
lowerCAmelCase = dim // n_heads
lowerCAmelCase = 10_000.0
lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , A__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCAmelCase = params["n_kv_heads"] # for GQA / MQA
lowerCAmelCase = n_heads_per_shard // num_key_value_heads
lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCAmelCase = n_heads
lowerCAmelCase = n_heads_per_shard
lowerCAmelCase = dim
# permute for sliced rotary
def permute(A__ , A__=n_heads , A__=dim , A__=dim ):
return w.view(A__ , dima // n_heads // 2 , 2 , A__ ).transpose(1 , 2 ).reshape(A__ , A__ )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCAmelCase = torch.load(os.path.join(A__ , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
lowerCAmelCase = [
torch.load(os.path.join(A__ , f"consolidated.{i:02d}.pth" ) , map_location="cpu" )
for i in range(A__ )
]
lowerCAmelCase = 0
lowerCAmelCase = {"weight_map": {}}
for layer_i in range(A__ ):
lowerCAmelCase = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
lowerCAmelCase = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCAmelCase = {
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(A__ , A__ , A__ )
for i in range(A__ )
] , dim=0 , ).reshape(A__ , A__ ) )
lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
A__ , A__ , A__ )
for i in range(A__ )
] , dim=0 , ).reshape(A__ , A__ ) , A__ , A__ , A__ , )
lowerCAmelCase = torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
A__ , A__ , A__ )
for i in range(A__ )
] , dim=0 , ).reshape(A__ , A__ )
lowerCAmelCase = torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(A__ )] , dim=1 )
lowerCAmelCase = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(A__ )] , dim=0 )
lowerCAmelCase = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(A__ )] , dim=1 )
lowerCAmelCase = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(A__ )] , dim=0 )
lowerCAmelCase = inv_freq
for k, v in state_dict.items():
lowerCAmelCase = filename
param_count += v.numel()
torch.save(A__ , os.path.join(A__ , A__ ) )
lowerCAmelCase = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
lowerCAmelCase = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
lowerCAmelCase = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(A__ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(A__ )] , dim=0 ),
}
for k, v in state_dict.items():
lowerCAmelCase = filename
param_count += v.numel()
torch.save(A__ , os.path.join(A__ , A__ ) )
# Write configs
lowerCAmelCase = {"total_size": param_count * 2}
write_json(A__ , os.path.join(A__ , "pytorch_model.bin.index.json" ) )
lowerCAmelCase = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
lowerCAmelCase = params["multiple_of"] if "multiple_of" in params else 256
lowerCAmelCase = LlamaConfig(
hidden_size=A__ , intermediate_size=compute_intermediate_size(A__ , A__ , A__ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=A__ , )
config.save_pretrained(A__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
lowerCAmelCase = LlamaForCausalLM.from_pretrained(A__ , torch_dtype=torch.floataa , low_cpu_mem_usage=A__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(A__ , safe_serialization=A__ )
shutil.rmtree(A__ )
def __a ( A__ , A__ ) -> Any:
# Initialize the tokenizer based on the `spm` model
lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
lowerCAmelCase = tokenizer_class(A__ )
tokenizer.save_pretrained(A__ )
def __a ( ) -> Any:
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=A__ , help="Whether or not to save using `safetensors`." )
lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowerCAmelCase = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , A__ )
if __name__ == "__main__":
main()
| 649
| 1
|
def a_ ( ) -> int:
'''simple docstring'''
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(__snake_case , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 559
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__a : Any = logging.getLogger(__name__)
__a : Dict = 50 # max width of layer names
__a : int = 70 # max width of quantizer names
def a_ ( __snake_case ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase_ = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=__snake_case , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=__snake_case , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=__snake_case , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=__snake_case , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=__snake_case , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=__snake_case , type=__snake_case , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=__snake_case , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def a_ ( __snake_case ) -> Optional[int]:
'''simple docstring'''
if args.calibrator == "max":
UpperCamelCase_ = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
UpperCamelCase_ = 'histogram'
elif args.calibrator == "mse":
UpperCamelCase_ = 'histogram'
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
UpperCamelCase_ = QuantDescriptor(num_bits=args.aprec , calib_method=__snake_case )
UpperCamelCase_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(__snake_case )
def a_ ( __snake_case , __snake_case , __snake_case=False , __snake_case=False ) -> List[str]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__snake_case , ['embeddings'] , which='weight' , _disabled=__snake_case )
if args.quant_disable:
set_quantizer_by_name(__snake_case , [''] , _disabled=__snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(__snake_case , args.quant_disable_keyword , _disabled=__snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(__snake_case , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=__snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(__snake_case , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=__snake_case )
if args.recalibrate_weights:
recalibrate_weights(__snake_case )
if args.fuse_qkv:
fuse_qkv(__snake_case , __snake_case )
if args.clip_gelu:
clip_gelu(__snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__snake_case )
def a_ ( __snake_case ) -> Tuple:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def a_ ( __snake_case , __snake_case ) -> Any:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__snake_case )
def a_ ( __snake_case , __snake_case ) -> int:
'''simple docstring'''
def fusea(__snake_case , __snake_case , __snake_case ):
for mod in [qq, qk, qv]:
if not hasattr(__snake_case , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
UpperCamelCase_ = qq._amax.detach().item()
UpperCamelCase_ = qk._amax.detach().item()
UpperCamelCase_ = qv._amax.detach().item()
UpperCamelCase_ = max(__snake_case , __snake_case , __snake_case )
qq._amax.fill_(__snake_case )
qk._amax.fill_(__snake_case )
qv._amax.fill_(__snake_case )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a_ ( __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
UpperCamelCase_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__snake_case )
UpperCamelCase_ = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def a_ ( __snake_case ) -> Tuple:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
UpperCamelCase_ = mod.weight.shape[0]
UpperCamelCase_ = mod._weight_quantizer._amax.detach()
UpperCamelCase_ = torch.ones(__snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def a_ ( __snake_case ) -> List[Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__snake_case , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCamelCase_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCamelCase_ = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCamelCase_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__snake_case , keepdims=__snake_case ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
UpperCamelCase_ = amax
def a_ ( __snake_case , __snake_case=2_5 , __snake_case=1_8_0 , __snake_case=None ) -> Tuple:
'''simple docstring'''
if ignore is None:
UpperCamelCase_ = []
elif not isinstance(__snake_case , __snake_case ):
UpperCamelCase_ = [ignore]
UpperCamelCase_ = 0
for name, mod in model.named_modules():
if not hasattr(__snake_case , 'weight' ):
continue
UpperCamelCase_ = max(__snake_case , len(__snake_case ) )
for name, mod in model.named_modules():
UpperCamelCase_ = getattr(__snake_case , '_input_quantizer' , __snake_case )
UpperCamelCase_ = getattr(__snake_case , '_weight_quantizer' , __snake_case )
if not hasattr(__snake_case , 'weight' ):
continue
if type(__snake_case ) in ignore:
continue
if [True for s in ignore if type(__snake_case ) is str and s in name]:
continue
UpperCamelCase_ = F'''Act:{input_q.extra_repr()}'''
UpperCamelCase_ = F'''Wgt:{weight_q.extra_repr()}'''
UpperCamelCase_ = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(__snake_case ) <= line_width:
logger.info(__snake_case )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{' ':{name_width}} {wgt_str}''' )
def a_ ( __snake_case ) -> List[str]:
'''simple docstring'''
UpperCamelCase_ = 0
for name, mod in model.named_modules():
if isinstance(__snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def a_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
UpperCamelCase_ = getattr(__snake_case , __snake_case , __snake_case )
if quantizer_mod is not None:
assert hasattr(__snake_case , __snake_case )
setattr(__snake_case , __snake_case , __snake_case )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def a_ ( __snake_case , __snake_case , __snake_case="both" , **__snake_case ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase_ = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(__snake_case , __snake_case , '_input_quantizer' , __snake_case , __snake_case )
if which in ["weight", "both"]:
set_quantizer(__snake_case , __snake_case , '_weight_quantizer' , __snake_case , __snake_case )
logger.info(__snake_case )
def a_ ( __snake_case , __snake_case , **__snake_case ) -> str:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__snake_case , '_input_quantizer' ) or hasattr(__snake_case , '_weight_quantizer' ):
for n in names:
if re.search(__snake_case , __snake_case ):
set_quantizers(__snake_case , __snake_case , **__snake_case )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(__snake_case , __snake_case ):
UpperCamelCase_ = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(__snake_case , __snake_case , __snake_case )
logger.info(__snake_case )
| 559
| 1
|
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase:
def __init__( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = data
_UpperCamelCase = None
_UpperCamelCase = None
def lowerCamelCase__ ( __snake_case ) -> None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCamelCase__ ( ) -> None: # Main function for testing.
"""simple docstring"""
_UpperCamelCase = Node(1 )
_UpperCamelCase = Node(2 )
_UpperCamelCase = Node(3 )
_UpperCamelCase = Node(4 )
_UpperCamelCase = Node(5 )
_UpperCamelCase = Node(6 )
_UpperCamelCase = Node(7 )
_UpperCamelCase = Node(8 )
_UpperCamelCase = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print('''Tree is: ''' )
display(__snake_case )
if __name__ == "__main__":
main()
| 19
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[Any] = (DEISMultistepScheduler,)
_SCREAMING_SNAKE_CASE : int = (("num_inference_steps", 25),)
def lowerCAmelCase__ ( self , **_lowerCAmelCase ):
a ={
'num_train_timesteps': 1_000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**__a )
return config
def lowerCAmelCase__ ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ):
a =dict(self.forward_default_kwargs )
a =kwargs.pop("""num_inference_steps""" , __a )
a =self.dummy_sample
a =0.1 * sample
a =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a =self.get_scheduler_config(**__a )
a =scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals
a =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
a =scheduler_class.from_pretrained(__a )
new_scheduler.set_timesteps(__a )
# copy over dummy past residuals
a =dummy_past_residuals[: new_scheduler.config.solver_order]
a =sample, sample
for t in range(__a , time_step + scheduler.config.solver_order + 1 ):
a =scheduler.step(__a , __a , __a , **__a ).prev_sample
a =new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ):
a =dict(self.forward_default_kwargs )
a =kwargs.pop("""num_inference_steps""" , __a )
a =self.dummy_sample
a =0.1 * sample
a =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a =self.get_scheduler_config()
a =scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals (must be after setting timesteps)
a =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
a =scheduler_class.from_pretrained(__a )
# copy over dummy past residuals
new_scheduler.set_timesteps(__a )
# copy over dummy past residual (must be after setting timesteps)
a =dummy_past_residuals[: new_scheduler.config.solver_order]
a =scheduler.step(__a , __a , __a , **__a ).prev_sample
a =new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase__ ( self , _lowerCAmelCase=None , **_lowerCAmelCase ):
if scheduler is None:
a =self.scheduler_classes[0]
a =self.get_scheduler_config(**__a )
a =scheduler_class(**__a )
a =self.scheduler_classes[0]
a =self.get_scheduler_config(**__a )
a =scheduler_class(**__a )
a =10
a =self.dummy_model()
a =self.dummy_sample_deter
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
a =model(__a , __a )
a =scheduler.step(__a , __a , __a ).prev_sample
return sample
def lowerCAmelCase__ ( self ):
a =dict(self.forward_default_kwargs )
a =kwargs.pop("""num_inference_steps""" , __a )
for scheduler_class in self.scheduler_classes:
a =self.get_scheduler_config()
a =scheduler_class(**__a )
a =self.dummy_sample
a =0.1 * sample
if num_inference_steps is not None and hasattr(__a , """set_timesteps""" ):
scheduler.set_timesteps(__a )
elif num_inference_steps is not None and not hasattr(__a , """set_timesteps""" ):
a =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a =[residual + 0.2, residual + 0.15, residual + 0.10]
a =dummy_past_residuals[: scheduler.config.solver_order]
a =scheduler.timesteps[5]
a =scheduler.timesteps[6]
a =scheduler.step(__a , __a , __a , **__a ).prev_sample
a =scheduler.step(__a , __a , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase__ ( self ):
a =DEISMultistepScheduler(**self.get_scheduler_config() )
a =self.full_loop(scheduler=__a )
a =torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
a =DPMSolverSinglestepScheduler.from_config(scheduler.config )
a =DPMSolverMultistepScheduler.from_config(scheduler.config )
a =UniPCMultistepScheduler.from_config(scheduler.config )
a =DEISMultistepScheduler.from_config(scheduler.config )
a =self.full_loop(scheduler=__a )
a =torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def lowerCAmelCase__ ( self ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCAmelCase__ ( self ):
self.check_over_configs(thresholding=__a )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type="""deis""" , solver_order=__a , solver_type=__a , )
def lowerCAmelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCAmelCase__ ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
a =self.full_loop(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
assert not torch.isnan(__a ).any(), "Samples have nan numbers"
def lowerCAmelCase__ ( self ):
self.check_over_configs(lower_order_final=__a )
self.check_over_configs(lower_order_final=__a )
def lowerCAmelCase__ ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__a , time_step=0 )
def lowerCAmelCase__ ( self ):
a =self.full_loop()
a =torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def lowerCAmelCase__ ( self ):
a =self.full_loop(prediction_type="""v_prediction""" )
a =torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.0_91 ) < 1E-3
def lowerCAmelCase__ ( self ):
a =self.scheduler_classes[0]
a =self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 )
a =scheduler_class(**__a )
a =10
a =self.dummy_model()
a =self.dummy_sample_deter.half()
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
a =model(__a , __a )
a =scheduler.step(__a , __a , __a ).prev_sample
assert sample.dtype == torch.floataa
| 717
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_lowerCamelCase = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_lowerCamelCase = 10
_lowerCamelCase = 256
def lowerCamelCase ( UpperCAmelCase_ : List[str] )-> Optional[MinHash]:
"""simple docstring"""
if len(UpperCAmelCase_ ) < MIN_NUM_TOKENS:
return None
a =MinHash(num_perm=UpperCAmelCase_ )
for token in set(UpperCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase ( UpperCAmelCase_ : str )-> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(UpperCAmelCase_ ) if len(t.strip() ) > 0}
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self , *,
_lowerCAmelCase = 0.85 , ):
a =duplication_jaccard_threshold
a =NUM_PERM
a =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a =defaultdict(_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
a =self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def lowerCAmelCase__ ( self ):
a =[]
for base, duplicates in self._duplicate_clusters.items():
a =[base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
a =[{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
a =self.get_duplicate_clusters()
with open(_lowerCAmelCase , """w""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def lowerCamelCase ( UpperCAmelCase_ : Optional[int] )-> str:
"""simple docstring"""
a , a =element
a =get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase ( UpperCAmelCase_ : Type[Dataset] )-> Any:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(UpperCAmelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase ( UpperCAmelCase_ : Type[Dataset] , UpperCAmelCase_ : float )-> Union[str, Any]:
"""simple docstring"""
a =DuplicationIndex(duplication_jaccard_threshold=UpperCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(UpperCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(UpperCAmelCase_ , UpperCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> float:
"""simple docstring"""
a =get_tokens(UpperCAmelCase_ )
a =get_tokens(UpperCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_lowerCamelCase = None
def lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] )-> List[str]:
"""simple docstring"""
a =[]
for elementa in cluster:
a =_shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
a =_shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(UpperCAmelCase_ , UpperCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a =1
extremes.append(UpperCAmelCase_ )
return extremes
def lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] )-> int:
"""simple docstring"""
global _shared_dataset
a =dataset
a =[]
a =partial(_find_cluster_extremes_shared , jaccard_threshold=UpperCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
UpperCAmelCase_ , UpperCAmelCase_ , ) , total=len(UpperCAmelCase_ ) , ):
extremes_list.append(UpperCAmelCase_ )
return extremes_list
def lowerCamelCase ( UpperCAmelCase_ : Type[Dataset] , UpperCAmelCase_ : float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
a =make_duplicate_clusters(UpperCAmelCase_ , UpperCAmelCase_ )
a ={x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
a ={}
a =find_extremes(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
a =element
a =duplicate_indices - set(extreme_dict.keys() )
a =dataset.filter(lambda UpperCAmelCase_ , UpperCAmelCase_ : idx not in remove_indices , with_indices=UpperCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a =element["""base_index"""] in extreme_dict
if element["is_extreme"]:
a =extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(UpperCAmelCase_ )}''' )
print(F'''Number of duplicate clusters: {len(UpperCAmelCase_ )}''' )
print(F'''Files in duplicate cluster: {len(UpperCAmelCase_ )}''' )
print(F'''Unique files in duplicate cluster: {len(UpperCAmelCase_ )}''' )
print(F'''Filtered dataset size: {len(UpperCAmelCase_ )}''' )
return ds_filter, duplicate_clusters
| 321
| 0
|
import random
def A__ ( lowercase: int, lowercase: Tuple, lowercase: List[str] ) -> Tuple:
A : List[str] =a[left_index]
A : List[Any] =left_index + 1
for j in range(left_index + 1, lowercase ):
if a[j] < pivot:
A , A : Any =a[i], a[j]
i += 1
A , A : List[Any] =a[i - 1], a[left_index]
return i - 1
def A__ ( lowercase: str, lowercase: List[str], lowercase: List[Any] ) -> int:
if left < right:
A : Union[str, Any] =random.randint(lowercase, right - 1 )
A , A : int =(
a[left],
a[pivot],
) # switches the pivot with the left most bound
A : Any =partition(lowercase, lowercase, lowercase )
quick_sort_random(
lowercase, lowercase, lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase, pivot_index + 1, lowercase ) # recursive quicksort to the right of the pivot point
def A__ ( ) -> Any:
A : Optional[int] =input('Enter numbers separated by a comma:\n' ).strip()
A : Tuple =[int(lowercase ) for item in user_input.split(',' )]
quick_sort_random(lowercase, 0, len(lowercase ) )
print(lowercase )
if __name__ == "__main__":
main()
| 305
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : Tuple = BlenderbotConfig
lowercase : Optional[int] = {}
lowercase : Union[str, Any] = "gelu"
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Optional[int]=99 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=37 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=20 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : str=0 , ) -> List[str]:
A : List[Any] =parent
A : str =batch_size
A : int =seq_length
A : Optional[Any] =is_training
A : List[str] =use_labels
A : List[Any] =vocab_size
A : Tuple =hidden_size
A : List[Any] =num_hidden_layers
A : int =num_attention_heads
A : Optional[int] =intermediate_size
A : List[Any] =hidden_dropout_prob
A : Union[str, Any] =attention_probs_dropout_prob
A : Optional[Any] =max_position_embeddings
A : Optional[Any] =eos_token_id
A : List[str] =pad_token_id
A : Union[str, Any] =bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
A : Dict =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A : Optional[int] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A : List[Any] =tf.concat([input_ids, eos_tensor] , axis=1 )
A : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Any =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A : int =prepare_blenderbot_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
A : List[Any] =TFBlenderbotModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder()
A : int =inputs_dict['input_ids']
A : Any =input_ids[:1, :]
A : Optional[Any] =inputs_dict['attention_mask'][:1, :]
A : List[str] =inputs_dict['head_mask']
A : List[Any] =1
# first forward pass
A : Any =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
A , A : Any =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A : List[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Tuple =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A : Union[str, Any] =tf.concat([input_ids, next_tokens] , axis=-1 )
A : List[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A : List[str] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
A : Optional[Any] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A : Tuple =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A : Tuple =output_from_no_past[:, -3:, random_slice_idx]
A : str =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1e-3 )
def A__ ( lowercase: List[str], lowercase: List[str], lowercase: Optional[Any], lowercase: Optional[Any]=None, lowercase: Tuple=None, lowercase: List[str]=None, lowercase: Union[str, Any]=None, lowercase: Dict=None, ) -> Dict:
if attention_mask is None:
A : Any =tf.cast(tf.math.not_equal(lowercase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
A : Optional[Any] =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
A : List[Any] =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A : Tuple =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase : Any = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : Tuple = True
lowercase : Dict = False
lowercase : int = False
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
A : Optional[Any] =TFBlenderbotModelTester(self )
A : Dict =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
A : Any =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = ["My friends are cool but they eat too many carbs."]
lowercase : str = "facebook/blenderbot-400M-distill"
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Union[str, Any]:
A : Tuple =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
A : Optional[Any] =self.tokenizer(self.src_text , return_tensors='tf' )
A : Tuple =self.model.generate(
model_inputs.input_ids , )
A : Union[str, Any] =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 305
| 1
|
from __future__ import annotations
from math import pi
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675
|
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Tuple = 'levit'
def __init__(self , A=224 , A=3 , A=3 , A=2 , A=1 , A=16 , A=[128, 256, 384] , A=[4, 8, 12] , A=[4, 4, 4] , A=[16, 16, 16] , A=0 , A=[2, 2, 2] , A=[2, 2, 2] , A=0.02 , **A , ) -> Any:
"""simple docstring"""
super().__init__(**A )
_a = image_size
_a = num_channels
_a = kernel_size
_a = stride
_a = padding
_a = hidden_sizes
_a = num_attention_heads
_a = depths
_a = key_dim
_a = drop_path_rate
_a = patch_size
_a = attention_ratio
_a = mlp_ratio
_a = initializer_range
_a = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = version.parse('1.11' )
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a__ (self ) -> float:
"""simple docstring"""
return 1E-4
| 11
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCamelCase : Optional[int] = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 284
| 0
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ ,snake_case_ = image.size
snake_case_ ,snake_case_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case_ = image.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case_ = np.array(__UpperCAmelCase ).astype(np.floataa ) / 2_5_5.0
snake_case_ = image[None].transpose(0, 3, 1, 2 )
snake_case_ = torch.from_numpy(__UpperCAmelCase )
return 2.0 * image - 1.0
class a ( _lowerCamelCase ):
def __init__( self : str , lowercase_ : VQModel , lowercase_ : UNetaDModel , lowercase_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=lowercase_ , unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : Tuple , lowercase_ : Union[torch.Tensor, PIL.Image.Image] = None , lowercase_ : Optional[int] = 1 , lowercase_ : Optional[int] = 100 , lowercase_ : Optional[float] = 0.0 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ):
if isinstance(lowercase_ , PIL.Image.Image ):
snake_case_ = 1
elif isinstance(lowercase_ , torch.Tensor ):
snake_case_ = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowercase_ )}" )
if isinstance(lowercase_ , PIL.Image.Image ):
snake_case_ = preprocess(lowercase_ )
snake_case_ ,snake_case_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case_ = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case_ = next(self.unet.parameters() ).dtype
snake_case_ = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
snake_case_ = image.to(device=self.device , dtype=lowercase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowercase_ , device=self.device )
snake_case_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ = {}
if accepts_eta:
snake_case_ = eta
for t in self.progress_bar(lowercase_ ):
# concat latents and low resolution image in the channel dimension.
snake_case_ = torch.cat([latents, image] , dim=1 )
snake_case_ = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
snake_case_ = self.unet(lowercase_ , lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# decode the image latents with the VQVAE
snake_case_ = self.vqvae.decode(lowercase_ ).sample
snake_case_ = torch.clamp(lowercase_ , -1.0 , 1.0 )
snake_case_ = image / 2 + 0.5
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 593
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
snake_case_ ,snake_case_ = 1, 1
snake_case_ = 2
while True:
snake_case_ = 0
snake_case_ = fa + fa
snake_case_ ,snake_case_ = fa, f
index += 1
for _ in str(__UpperCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 593
| 1
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
def __init__( self : str,__A : int,__A : Union[str, Any]=1_3,__A : Optional[int]=7,__A : Dict=True,__A : Dict=True,__A : Optional[int]=True,__A : List[str]=True,__A : int=True,__A : int=False,__A : List[Any]=False,__A : Union[str, Any]=False,__A : Union[str, Any]=2,__A : str=9_9,__A : List[str]=0,__A : Any=3_2,__A : Optional[Any]=5,__A : Union[str, Any]=4,__A : List[Any]=0.1,__A : Tuple=0.1,__A : Dict=5_1_2,__A : Optional[int]=2,__A : List[str]=0.02,__A : Tuple=2,__A : Optional[int]=4,__A : List[Any]="last",__A : Optional[Any]=True,__A : Any=None,__A : Optional[Any]=0,):
_lowerCamelCase : Any = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : int = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Any = use_input_lengths
_lowerCamelCase : Any = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : Union[str, Any] = gelu_activation
_lowerCamelCase : Optional[Any] = sinusoidal_embeddings
_lowerCamelCase : Optional[int] = causal
_lowerCamelCase : str = asm
_lowerCamelCase : Optional[int] = n_langs
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = n_special
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : Tuple = type_sequence_label_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : List[str] = summary_type
_lowerCamelCase : Union[str, Any] = use_proj
_lowerCamelCase : Dict = scope
_lowerCamelCase : List[Any] = bos_token_id
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Tuple = None
if self.use_input_lengths:
_lowerCamelCase : Optional[Any] = (
ids_tensor([self.batch_size],vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_lowerCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length],self.n_langs )
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : int = ids_tensor([self.batch_size],2 ).float()
_lowerCamelCase : Dict = ids_tensor([self.batch_size],self.num_choices )
_lowerCamelCase : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase_ ( self : Union[str, Any] ):
return XLMConfig(
vocab_size=self.vocab_size,n_special=self.n_special,emb_dim=self.hidden_size,n_layers=self.num_hidden_layers,n_heads=self.num_attention_heads,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,gelu_activation=self.gelu_activation,sinusoidal_embeddings=self.sinusoidal_embeddings,asm=self.asm,causal=self.causal,n_langs=self.n_langs,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,summary_type=self.summary_type,use_proj=self.use_proj,num_labels=self.num_labels,bos_token_id=self.bos_token_id,)
def lowerCamelCase_ ( self : Union[str, Any],__A : Optional[int],__A : Optional[int],__A : int,__A : List[Any],__A : List[str],__A : Optional[Any],__A : Optional[int],__A : Union[str, Any],__A : List[Any],):
_lowerCamelCase : List[Any] = XLMModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : int = model(__A,lengths=__A,langs=__A )
_lowerCamelCase : str = model(__A,langs=__A )
_lowerCamelCase : str = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Dict,__A : Optional[int],__A : int,__A : Optional[int],__A : Union[str, Any],__A : Union[str, Any],__A : int,__A : Optional[int],__A : int,__A : List[str],):
_lowerCamelCase : List[str] = XLMWithLMHeadModel(__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[Any] = model(__A,token_type_ids=__A,labels=__A )
self.parent.assertEqual(result.loss.shape,() )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Optional[int],__A : int,__A : List[Any],__A : Optional[int],__A : Union[str, Any],__A : str,__A : Tuple,__A : List[Any],__A : str,__A : List[Any],):
_lowerCamelCase : List[str] = XLMForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = model(__A )
_lowerCamelCase : str = model(__A,start_positions=__A,end_positions=__A )
_lowerCamelCase : Optional[int] = outputs
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Union[str, Any],__A : Optional[Any],__A : Optional[int],__A : Optional[Any],__A : Dict,__A : int,__A : Union[str, Any],__A : List[Any],):
_lowerCamelCase : List[str] = XLMForQuestionAnswering(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = model(__A )
_lowerCamelCase : Dict = model(
__A,start_positions=__A,end_positions=__A,cls_index=__A,is_impossible=__A,p_mask=__A,)
_lowerCamelCase : str = model(
__A,start_positions=__A,end_positions=__A,cls_index=__A,is_impossible=__A,)
((_lowerCamelCase) , ) : int = result_with_labels.to_tuple()
_lowerCamelCase : int = model(__A,start_positions=__A,end_positions=__A )
((_lowerCamelCase) , ) : Tuple = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape,() )
self.parent.assertEqual(result.start_top_log_probs.shape,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape,(self.batch_size,) )
def lowerCamelCase_ ( self : List[Any],__A : Optional[int],__A : str,__A : int,__A : Any,__A : str,__A : str,__A : Tuple,__A : int,__A : List[str],):
_lowerCamelCase : Union[str, Any] = XLMForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Any = model(__A,labels=__A )
self.parent.assertEqual(result.loss.shape,() )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : List[Any],__A : Dict,__A : Union[str, Any],__A : List[str],__A : List[str],__A : Optional[int],__A : Optional[Any],__A : List[str],__A : str,__A : Optional[int],):
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : List[Any] = XLMForTokenClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,attention_mask=__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Optional[Any],__A : Dict,__A : Dict,__A : Union[str, Any],__A : Any,__A : int,__A : Tuple,__A : Any,__A : Union[str, Any],__A : List[Any],):
_lowerCamelCase : Optional[int] = self.num_choices
_lowerCamelCase : List[Any] = XLMForMultipleChoice(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
_lowerCamelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
_lowerCamelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
_lowerCamelCase : str = model(
__A,attention_mask=__A,token_type_ids=__A,labels=__A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase_ = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self : int,__A : Tuple,__A : Union[str, Any],__A : Optional[int],__A : Union[str, Any],__A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase_ ( self : List[Any],__A : List[Any],__A : Dict,__A : Union[str, Any]=False ):
_lowerCamelCase : List[str] = super()._prepare_for_class(__A,__A,return_labels=__A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowerCamelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__A )
_lowerCamelCase : Dict = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__A )
return inputs_dict
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : int = XLMModelTester(self )
_lowerCamelCase : Optional[int] = ConfigTester(self,config_class=__A,emb_dim=3_7 )
def lowerCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__A )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__A )
def lowerCamelCase_ ( self : str,__A : Optional[Any],__A : str,__A : List[str],__A : Tuple,__A : Optional[Any],__A : Optional[int]=False,__A : Optional[int]=1 ):
self.assertIsInstance(__A,__A )
self.assertListEqual(
[isinstance(__A,__A ) for iter_attentions in attentions],[True] * len(__A ) )
self.assertEqual(len(__A ),(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__A ):
# adds PAD dummy token
_lowerCamelCase : Optional[Any] = min_length + idx + 1
_lowerCamelCase : int = min_length + idx + 1
_lowerCamelCase : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions],[expected_shape] * len(__A ) )
def lowerCamelCase_ ( self : Optional[Any],__A : Union[str, Any],__A : Dict,__A : List[Any],__A : Dict,__A : List[str],__A : Union[str, Any]=False,__A : Dict=1 ):
self.assertIsInstance(__A,__A )
self.assertListEqual(
[isinstance(__A,__A ) for iter_hidden_states in hidden_states],[True] * len(__A ),)
self.assertEqual(len(__A ),(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__A ):
# adds PAD dummy token
_lowerCamelCase : int = min_length + idx + 1
_lowerCamelCase : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],[expected_shape] * len(__A ),)
pass
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = XLMModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : int = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(__A )
_lowerCamelCase : int = torch.tensor([[1_4, 4_4_7]],dtype=torch.long,device=__A ) # the president
_lowerCamelCase : Optional[Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowerCamelCase : Optional[Any] = model.generate(__A,do_sample=__A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist(),__A )
| 44
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 0.0
a__ = 1
a__ = 1
a__ = True
a__ = False
a__ = False
a__ = False
a__ = jnp.floataa
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = []
__magic_name__ = []
for i in range(self.num_layers ):
__magic_name__ = self.in_channels if i == 0 else self.out_channels
__magic_name__ = FlaxResnetBlockaD(
in_channels=UpperCamelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
__magic_name__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase__ )
__magic_name__ = resnets
__magic_name__ = attentions
if self.add_downsample:
__magic_name__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=True ) -> Any:
"""simple docstring"""
__magic_name__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__magic_name__ = resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
__magic_name__ = attn(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
__magic_name__ = self.downsamplers_a(UpperCamelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 0.0
a__ = 1
a__ = True
a__ = jnp.floataa
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = []
for i in range(self.num_layers ):
__magic_name__ = self.in_channels if i == 0 else self.out_channels
__magic_name__ = FlaxResnetBlockaD(
in_channels=UpperCamelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
__magic_name__ = resnets
if self.add_downsample:
__magic_name__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int=True ) -> Dict:
"""simple docstring"""
__magic_name__ = ()
for resnet in self.resnets:
__magic_name__ = resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
__magic_name__ = self.downsamplers_a(UpperCamelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 0.0
a__ = 1
a__ = 1
a__ = True
a__ = False
a__ = False
a__ = False
a__ = jnp.floataa
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = []
__magic_name__ = []
for i in range(self.num_layers ):
__magic_name__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__magic_name__ = self.prev_output_channel if i == 0 else self.out_channels
__magic_name__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
__magic_name__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase__ )
__magic_name__ = resnets
__magic_name__ = attentions
if self.add_upsample:
__magic_name__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict=True ) -> Union[str, Any]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__magic_name__ = res_hidden_states_tuple[-1]
__magic_name__ = res_hidden_states_tuple[:-1]
__magic_name__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__magic_name__ = resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
__magic_name__ = attn(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
if self.add_upsample:
__magic_name__ = self.upsamplers_a(UpperCamelCase__ )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 0.0
a__ = 1
a__ = True
a__ = jnp.floataa
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = []
for i in range(self.num_layers ):
__magic_name__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__magic_name__ = self.prev_output_channel if i == 0 else self.out_channels
__magic_name__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
__magic_name__ = resnets
if self.add_upsample:
__magic_name__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=True ) -> Dict:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
__magic_name__ = res_hidden_states_tuple[-1]
__magic_name__ = res_hidden_states_tuple[:-1]
__magic_name__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__magic_name__ = resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
if self.add_upsample:
__magic_name__ = self.upsamplers_a(UpperCamelCase__ )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
a__ = 42
a__ = 0.0
a__ = 1
a__ = 1
a__ = False
a__ = False
a__ = jnp.floataa
def _lowercase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__magic_name__ = []
for _ in range(self.num_layers ):
__magic_name__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase__ )
__magic_name__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
__magic_name__ = resnets
__magic_name__ = attentions
def __call__( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int=True ) -> str:
"""simple docstring"""
__magic_name__ = self.resnets[0](UpperCamelCase__ , UpperCamelCase__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__magic_name__ = attn(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
__magic_name__ = resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
return hidden_states
| 529
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : List[Any] = 16
__lowerCamelCase : List[Any] = 32
def _snake_case ( lowerCAmelCase : Accelerator , lowerCAmelCase : int = 1_6 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_ : List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase , max_length=lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ : Optional[Any] = datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ : List[str] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ : Tuple = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ : int = 8
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
return tokenizer.pad(
lowerCAmelCase , padding="longest" , max_length=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : List[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Dict = mocked_dataloaders # noqa: F811
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase ) == "1":
SCREAMING_SNAKE_CASE_ : int = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ : Dict = config["lr"]
SCREAMING_SNAKE_CASE_ : Dict = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE_ : str = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_ : Tuple = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_ : Dict = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_ : Any = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_dataloaders(lowerCAmelCase , lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ : List[Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Now we train the model
for epoch in range(lowerCAmelCase ):
model.train()
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ : str = model(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = outputs.loss
SCREAMING_SNAKE_CASE_ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
SCREAMING_SNAKE_CASE_ : Any = 0
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
SCREAMING_SNAKE_CASE_ : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE_ : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase , references=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowerCAmelCase )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase , default=lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
SCREAMING_SNAKE_CASE_ : str = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
main()
| 316
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
__lowerCamelCase : List[str] = {
'''camembert-base''': 5_12,
}
__lowerCamelCase : Tuple = '''▁'''
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
A = CamembertTokenizer
def __init__( self : int,_A : Optional[int]=None,_A : str=None,_A : Dict="<s>",_A : List[Any]="</s>",_A : Optional[int]="</s>",_A : Optional[Any]="<s>",_A : Any="<unk>",_A : str="<pad>",_A : int="<mask>",_A : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"],**_A : Union[str, Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
_A,tokenizer_file=_A,bos_token=_A,eos_token=_A,sep_token=_A,cls_token=_A,unk_token=_A,pad_token=_A,mask_token=_A,additional_special_tokens=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : Optional[int] = False if not self.vocab_file else True
def __UpperCamelCase ( self : int,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : Optional[int],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file,_A )
return (out_vocab_file,)
| 316
| 1
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=True , a=False , a=False , a=False , a=2 , a=99 , a=0 , a=32 , a=5 , a=4 , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=2 , a=4 , a="last" , a=True , a=None , a=0 , ) -> Tuple:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_lengths
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = gelu_activation
SCREAMING_SNAKE_CASE = sinusoidal_embeddings
SCREAMING_SNAKE_CASE = causal
SCREAMING_SNAKE_CASE = asm
SCREAMING_SNAKE_CASE = n_langs
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_special
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = summary_type
SCREAMING_SNAKE_CASE = use_proj
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , 2).float()
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> List[Any]:
SCREAMING_SNAKE_CASE = XLMModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , lengths=a , langs=a)
SCREAMING_SNAKE_CASE = model(a , langs=a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = XLMWithLMHeadModel(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = XLMForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a , start_positions=a , end_positions=a)
SCREAMING_SNAKE_CASE = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Tuple:
SCREAMING_SNAKE_CASE = XLMForQuestionAnswering(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
SCREAMING_SNAKE_CASE = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE = model(a , start_positions=a , end_positions=a)
((SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Dict:
SCREAMING_SNAKE_CASE = XLMForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = XLMForTokenClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = XLMForMultipleChoice(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : Optional[int] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Union[str, Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : List[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=False) -> List[Any]:
SCREAMING_SNAKE_CASE = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = XLMModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , emb_dim=37)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a=False , a=1) -> List[str]:
self.assertIsInstance(a , a)
self.assertListEqual(
[isinstance(a , a) for iter_attentions in attentions] , [True] * len(a))
self.assertEqual(len(a) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(a):
# adds PAD dummy token
SCREAMING_SNAKE_CASE = min_length + idx + 1
SCREAMING_SNAKE_CASE = min_length + idx + 1
SCREAMING_SNAKE_CASE = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(a))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a=False , a=1) -> Dict:
self.assertIsInstance(a , a)
self.assertListEqual(
[isinstance(a , a) for iter_hidden_states in hidden_states] , [True] * len(a) , )
self.assertEqual(len(a) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(a):
# adds PAD dummy token
SCREAMING_SNAKE_CASE = min_length + idx + 1
SCREAMING_SNAKE_CASE = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(a) , )
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = XLMModel.from_pretrained(a)
self.assertIsNotNone(a)
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(a)
SCREAMING_SNAKE_CASE = torch.tensor([[14, 447]] , dtype=torch.long , device=a) # the president
SCREAMING_SNAKE_CASE = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
SCREAMING_SNAKE_CASE = model.generate(a , do_sample=a)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , a)
| 73
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCAmelCase :
def __init__( self : str , a__ : Union[str, Any] , a__ : Union[str, Any]=13 , a__ : Union[str, Any]=7 , a__ : Any=True , a__ : str=True , a__ : Optional[Any]=True , a__ : Any=99 , a__ : Dict=32 , a__ : Tuple=5 , a__ : List[Any]=4 , a__ : List[Any]=37 , a__ : List[str]="gelu" , a__ : Dict=0.1 , a__ : str=0.1 , a__ : str=512 , a__ : List[str]=16 , a__ : Dict=2 , a__ : Union[str, Any]=0.02 , a__ : Dict=3 , a__ : List[Any]=4 , a__ : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : int = use_token_type_ids
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = max_position_embeddings
lowerCAmelCase__ : str = type_vocab_size
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Tuple = num_labels
lowerCAmelCase__ : Optional[Any] = num_choices
lowerCAmelCase__ : int = scope
lowerCAmelCase__ : List[str] = self.vocab_size - 1
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Dict = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Any = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self : Dict , a__ : Any , a__ : Any , a__ : int , a__ : str , *a__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = OpenAIGPTModel(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a__ , token_type_ids=a__ , head_mask=a__ )
lowerCAmelCase__ : Optional[int] = model(a__ , token_type_ids=a__ )
lowerCAmelCase__ : Any = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Optional[int] , a__ : List[Any] , a__ : int , a__ : Dict , a__ : Any , *a__ : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = OpenAIGPTLMHeadModel(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Dict = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : List[str] , a__ : str , a__ : Dict , a__ : Union[str, Any] , a__ : int , *a__ : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = OpenAIGPTDoubleHeadsModel(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Any , a__ : Any , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Any , *a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = OpenAIGPTForSequenceClassification(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Tuple = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
A_ : str = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A_ : Any = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A_ : int = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A ( self : List[Any] , a__ : Optional[Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[Any] , a__ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _A ( self : Dict , a__ : Optional[int] , a__ : List[Any] , a__ : List[Any]=False ):
'''simple docstring'''
lowerCAmelCase__ : Any = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase__ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a__ , )
lowerCAmelCase__ : Union[str, Any] = inputs_dict["labels"]
lowerCAmelCase__ : int = inputs_dict["labels"]
lowerCAmelCase__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a__ , )
lowerCAmelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = OpenAIGPTModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self , config_class=a__ , n_embd=37 )
def _A ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a__ )
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a__ )
def _A ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a__ )
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a__ )
@slow
def _A ( self : Tuple ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Any = OpenAIGPTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a__ )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a__ ) # the president is
lowerCAmelCase__ : Optional[int] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase__ : Optional[int] = model.generate(a__ , do_sample=a__ )
self.assertListEqual(output_ids[0].tolist() , a__ )
| 378
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = 42
__snake_case = 42
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )]
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
_lowerCamelCase : Optional[int] = all_rotations(_UpperCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowerCamelCase : List[Any] = {
'''bwt_string''': ''''''.join([word[-1] for word in rotations] ),
'''idx_original_string''': rotations.index(_UpperCamelCase ),
}
return response
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
_lowerCamelCase : str = int(_UpperCamelCase )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_UpperCamelCase ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
_lowerCamelCase : List[Any] = [''''''] * len(_UpperCamelCase )
for _ in range(len(_UpperCamelCase ) ):
for i in range(len(_UpperCamelCase ) ):
_lowerCamelCase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] ="Provide a string that I will generate its BWT transform: "
SCREAMING_SNAKE_CASE__ : List[Any] =input(entry_msg).strip()
SCREAMING_SNAKE_CASE__ : Any =bwt_transform(s)
print(
F"""Burrows Wheeler transform for string \'{s}\' results """
F"""in \'{result['bwt_string']}\'"""
)
SCREAMING_SNAKE_CASE__ : Dict =reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"""Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' """
F"""we get original string \'{original_string}\'"""
)
| 721
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = False , ) -> Tuple:
super().__init__()
_lowerCamelCase : Tuple = nn.Embedding(_lowercase , _lowercase )
_lowerCamelCase : Dict = nn.Embedding(_lowercase , _lowercase )
_lowerCamelCase : Tuple = False
_lowerCamelCase : Any = nn.Dropout(p=_lowercase )
_lowerCamelCase : List[Any] = TaConfig(
vocab_size=_lowercase , d_model=_lowercase , num_heads=_lowercase , d_kv=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , feed_forward_proj=_lowercase , is_decoder=_lowercase , is_encoder_decoder=_lowercase , )
_lowerCamelCase : List[Any] = nn.ModuleList()
for lyr_num in range(_lowercase ):
_lowerCamelCase : Tuple = TaBlock(_lowercase )
self.encoders.append(_lowercase )
_lowerCamelCase : str = TaLayerNorm(_lowercase )
_lowerCamelCase : List[Any] = nn.Dropout(p=_lowercase )
def a__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
_lowerCamelCase : List[Any] = self.token_embedder(_lowercase )
_lowerCamelCase : Union[str, Any] = encoder_input_tokens.shape[1]
_lowerCamelCase : int = torch.arange(_lowercase , device=encoder_input_tokens.device )
x += self.position_encoding(_lowercase )
_lowerCamelCase : Tuple = self.dropout_pre(_lowercase )
# inverted the attention mask
_lowerCamelCase : int = encoder_input_tokens.size()
_lowerCamelCase : Union[str, Any] = self.get_extended_attention_mask(_lowercase , _lowercase )
for lyr in self.encoders:
_lowerCamelCase : List[Any] = lyr(_lowercase , _lowercase )[0]
_lowerCamelCase : str = self.layer_norm(_lowercase )
return self.dropout_post(_lowercase ), encoder_inputs_mask
| 558
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class a_ ( unittest.TestCase ):
UpperCamelCase_ : List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase_ : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ = text_generator("""This is a test""" , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
lowerCAmelCase__ = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
snake_case__ , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
lowerCAmelCase__ = text_generator("""This is a test""" , do_sample=snake_case__ , num_return_sequences=2 , return_tensors=snake_case__ )
self.assertEqual(
snake_case__ , [
{"""generated_token_ids""": ANY(snake_case__ )},
{"""generated_token_ids""": ANY(snake_case__ )},
] , )
lowerCAmelCase__ = text_generator.model.config.eos_token_id
lowerCAmelCase__ = """<pad>"""
lowerCAmelCase__ = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=snake_case__ , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case__ , )
self.assertEqual(
snake_case__ , [
[
{"""generated_token_ids""": ANY(snake_case__ )},
{"""generated_token_ids""": ANY(snake_case__ )},
],
[
{"""generated_token_ids""": ANY(snake_case__ )},
{"""generated_token_ids""": ANY(snake_case__ )},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ = text_generator("""This is a test""" , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
lowerCAmelCase__ = text_generator(["""This is a test""", """This is a second test"""] , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
lowerCAmelCase__ = TextGenerationPipeline(model=snake_case__ , tokenizer=snake_case__ )
return text_generator, ["This is a test", "Another test"]
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = """Hello I believe in"""
lowerCAmelCase__ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase__ = text_generator(snake_case__ )
self.assertEqual(
snake_case__ , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
lowerCAmelCase__ = text_generator(snake_case__ , stop_sequence=""" fe""" )
self.assertEqual(snake_case__ , [{"""generated_text""": """Hello I believe in fe"""}] )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : int , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = text_generator.model
lowerCAmelCase__ = text_generator.tokenizer
lowerCAmelCase__ = text_generator("""This is a test""" )
self.assertEqual(snake_case__ , [{"""generated_text""": ANY(snake_case__ )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
lowerCAmelCase__ = text_generator("""This is a test""" , return_full_text=snake_case__ )
self.assertEqual(snake_case__ , [{"""generated_text""": ANY(snake_case__ )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
lowerCAmelCase__ = pipeline(task="""text-generation""" , model=snake_case__ , tokenizer=snake_case__ , return_full_text=snake_case__ )
lowerCAmelCase__ = text_generator("""This is a test""" )
self.assertEqual(snake_case__ , [{"""generated_text""": ANY(snake_case__ )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
lowerCAmelCase__ = text_generator("""This is a test""" , return_full_text=snake_case__ )
self.assertEqual(snake_case__ , [{"""generated_text""": ANY(snake_case__ )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
lowerCAmelCase__ = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[{"""generated_text""": ANY(snake_case__ )}, {"""generated_text""": ANY(snake_case__ )}],
[{"""generated_text""": ANY(snake_case__ )}, {"""generated_text""": ANY(snake_case__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[{"""generated_text""": ANY(snake_case__ )}, {"""generated_text""": ANY(snake_case__ )}],
[{"""generated_text""": ANY(snake_case__ )}, {"""generated_text""": ANY(snake_case__ )}],
] , )
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = text_generator("""test""" , return_full_text=snake_case__ , return_text=snake_case__ )
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = text_generator("""test""" , return_full_text=snake_case__ , return_tensors=snake_case__ )
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = text_generator("""test""" , return_text=snake_case__ , return_tensors=snake_case__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ = text_generator("""""" )
self.assertEqual(snake_case__ , [{"""generated_text""": ANY(snake_case__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
lowerCAmelCase__ = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(snake_case__ ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : str ):
import torch
# Classic `model_kwargs`
lowerCAmelCase__ = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ = pipe("""This is a test""" )
self.assertEqual(
snake_case__ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ = pipe("""This is a test""" )
self.assertEqual(
snake_case__ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ = pipe("""This is a test""" )
self.assertEqual(
snake_case__ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
import torch
lowerCAmelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
import torch
lowerCAmelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=snake_case__ , top_p=0.5 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = """Hello world"""
lowerCAmelCase__ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
lowerCAmelCase__ = logging.get_logger("""transformers.generation.tf_utils""" )
else:
lowerCAmelCase__ = logging.get_logger("""transformers.generation.utils""" )
lowerCAmelCase__ = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(snake_case__ ) as cl:
lowerCAmelCase__ = text_generator(snake_case__ , max_length=10 , max_new_tokens=1 )
self.assertIn(snake_case__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(snake_case__ ) as cl:
lowerCAmelCase__ = text_generator(snake_case__ , max_new_tokens=1 )
self.assertNotIn(snake_case__ , cl.out )
with CaptureLogger(snake_case__ ) as cl:
lowerCAmelCase__ = text_generator(snake_case__ , max_length=10 )
self.assertNotIn(snake_case__ , cl.out )
| 644
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class a_ :
def __init__( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : int=13 , snake_case__ : List[str]=7 , snake_case__ : Any=True , snake_case__ : Any=True , snake_case__ : Dict=True , snake_case__ : List[Any]=True , snake_case__ : List[str]=99 , snake_case__ : Any=32 , snake_case__ : List[str]=2 , snake_case__ : Any=4 , snake_case__ : Dict=37 , snake_case__ : Optional[int]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[int]=512 , snake_case__ : Union[str, Any]=16 , snake_case__ : str=2 , snake_case__ : Dict=0.02 , snake_case__ : Tuple=3 , snake_case__ : List[Any]=4 , snake_case__ : List[Any]=None , snake_case__ : str=0 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = projection_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
lowerCAmelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any] ):
lowerCAmelCase__ = TFDPRContextEncoder(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : str , snake_case__ : Dict , snake_case__ : str , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Dict ):
lowerCAmelCase__ = TFDPRQuestionEncoder(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Tuple ):
lowerCAmelCase__ = TFDPRReader(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Any = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = TFDPRModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRReader.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
lowerCAmelCase__ = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase__ = model(snake_case__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase__ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 644
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Dict = None
__A : List[str] = logging.get_logger(__name__)
__A : str = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A : List[Any] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__A : Union[str, Any] = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
__A : Tuple = '''▁'''
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Tuple = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : Tuple = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : Union[str, Any] , A : Union[str, Any]=None , A : List[str]=None , A : int="<unk>" , A : List[Any]="<s>" , A : List[str]="</s>" , A : List[Any]="<pad>" , A : Optional[int]="[SEP]" , A : int="[MASK]" , A : Optional[int]="[CLS]" , **A : Dict , ) -> Any:
lowercase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
lowercase_ : List[str] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
lowercase_ : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
lowercase_ : List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
lowercase_ : List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
lowercase_ : str = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowercase_ : str = vocab_file
lowercase_ : str = False if not self.vocab_file else True
def A ( self : Any , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
lowercase_ : List[str] = [self.sep_token_id]
lowercase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self : Tuple , A : List[int] , A : Optional[List[int]] = None , A : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def A ( self : Any , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
lowercase_ : Tuple = [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Any , A : str , A : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Union[str, Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 704
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCAmelCase :
def __init__( self : Tuple , A : Union[str, Any] , A : Optional[int]=13 , A : Dict=30 , A : List[Any]=2 , A : List[Any]=3 , A : Tuple=True , A : Dict=True , A : Union[str, Any]=32 , A : Optional[int]=5 , A : Tuple=4 , A : Any=37 , A : Dict="gelu" , A : Optional[Any]=0.1 , A : Union[str, Any]=0.1 , A : Any=10 , A : Dict=0.02 , A : Any=3 , A : str=None , A : Dict=2 , ) -> Optional[int]:
lowercase_ : str = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : int = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Optional[Any] = is_training
lowercase_ : str = use_labels
lowercase_ : Tuple = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : Any = initializer_range
lowercase_ : Optional[int] = scope
lowercase_ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ : Optional[Any] = (image_size // patch_size) ** 2
lowercase_ : List[Any] = num_patches + 2
def A ( self : Dict ) -> str:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def A ( self : Tuple ) -> str:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A ( self : Optional[Any] , A : Optional[int] , A : Any , A : Any ) -> int:
lowercase_ : Any = DeiTModel(config=A )
model.to(A )
model.eval()
lowercase_ : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , A : int , A : Optional[int] , A : Optional[Any] ) -> str:
lowercase_ : int = DeiTForMaskedImageModeling(config=A )
model.to(A )
model.eval()
lowercase_ : int = model(A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ : int = 1
lowercase_ : Any = DeiTForMaskedImageModeling(A )
model.to(A )
model.eval()
lowercase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A ( self : int , A : str , A : Optional[int] , A : List[str] ) -> List[Any]:
lowercase_ : str = self.type_sequence_label_size
lowercase_ : Tuple = DeiTForImageClassification(A )
model.to(A )
model.eval()
lowercase_ : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Dict = 1
lowercase_ : Optional[Any] = DeiTForImageClassification(A )
model.to(A )
model.eval()
lowercase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Dict ) -> Optional[Any]:
lowercase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = config_and_inputs
lowercase_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Dict = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def A ( self : Optional[Any] ) -> Dict:
lowercase_ : List[Any] = DeiTModelTester(self )
lowercase_ : int = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def A ( self : Optional[int] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def A ( self : int ) -> List[Any]:
pass
def A ( self : Any ) -> Tuple:
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def A ( self : Tuple ) -> Optional[int]:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(A )
lowercase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Any = [*signature.parameters.keys()]
lowercase_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : Tuple ) -> Optional[int]:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : Dict ) -> List[str]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def A ( self : int ) -> Union[str, Any]:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def A ( self : Tuple , A : Optional[int] , A : int , A : Optional[Any]=False ) -> int:
lowercase_ : Dict = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A ( self : int ) -> str:
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : str = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ : Any = model_class(A )
model.to(A )
model.train()
lowercase_ : Any = self._prepare_for_class(A , A , return_labels=A )
lowercase_ : Optional[Any] = model(**A ).loss
loss.backward()
def A ( self : Dict ) -> Union[str, Any]:
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ : Optional[Any] = model_class(A )
model.gradient_checkpointing_enable()
model.to(A )
model.train()
lowercase_ : str = self._prepare_for_class(A , A , return_labels=A )
lowercase_ : Optional[Any] = model(**A ).loss
loss.backward()
def A ( self : Tuple ) -> Any:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A ),
*get_values(A ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowercase_ : int = problem_type['''title''']
lowercase_ : int = problem_type['''num_labels''']
lowercase_ : Dict = model_class(A )
model.to(A )
model.train()
lowercase_ : Tuple = self._prepare_for_class(A , A , return_labels=A )
if problem_type["num_labels"] > 1:
lowercase_ : Any = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
lowercase_ : Dict = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A ) as warning_list:
lowercase_ : str = model(**A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A ( self : Union[str, Any] ) -> Union[str, Any]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = DeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[Any] ) -> Dict:
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def A ( self : Optional[Any] ) -> Dict:
lowercase_ : Dict = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
A )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Tuple = prepare_img()
lowercase_ : Dict = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
lowercase_ : Any = model(**A )
# verify the logits
lowercase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : Optional[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : int = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
lowercase_ : Any = self.default_image_processor
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : List[Any] = image_processor(images=A , return_tensors='''pt''' )
lowercase_ : Any = inputs.pixel_values.to(A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ : List[Any] = model(A )
| 141
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = """▁"""
__lowerCamelCase : str = {"""vocab_file""": """spiece.model"""}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
__lowerCamelCase : Any = {
"""google/reformer-crime-and-punishment""": 52_4288,
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : str , __A : int , __A : str="</s>" , __A : Optional[int]="<unk>" , __A : int=[] , __A : Optional[Dict[str, Any]] = None , **__A : Any , ):
snake_case__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__A , unk_token=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
snake_case__ : List[str] = vocab_file
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
@property
def _lowercase ( self : Optional[int] ):
return self.sp_model.get_piece_size()
def _lowercase ( self : int ):
snake_case__ : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
snake_case__ : int = self.__dict__.copy()
snake_case__ : List[str] = None
return state
def __setstate__( self : List[str] , __A : Tuple ):
snake_case__ : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case__ : Dict = {}
snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , __A : str ):
return self.sp_model.encode(__A , out_type=__A )
def _lowercase ( self : List[str] , __A : List[str] ):
return self.sp_model.piece_to_id(__A )
def _lowercase ( self : Optional[int] , __A : Any ):
if index < self.sp_model.get_piece_size():
snake_case__ : Tuple = self.sp_model.IdToPiece(__A )
return token
def _lowercase ( self : Optional[int] , __A : Union[str, Any] ):
snake_case__ : Any = []
snake_case__ : str = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__A ) + token
snake_case__ : int = []
else:
current_sub_tokens.append(__A )
out_string += self.sp_model.decode(__A )
return out_string.strip()
def _lowercase ( self : Any , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Union[str, Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , "wb" ) as fi:
snake_case__ : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 297
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : str = {"""vocab_file""": """vocab.txt"""}
__lowerCamelCase : str = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
__lowerCamelCase : Dict = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
__lowerCamelCase : List[Any] = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : Tuple , __A : Any=None , __A : Union[str, Any]=None , __A : List[Any]=True , __A : Optional[Any]="[UNK]" , __A : Tuple="[SEP]" , __A : Tuple="[PAD]" , __A : List[str]="[CLS]" , __A : Optional[Any]="[MASK]" , __A : List[str]=True , __A : Optional[Any]=None , **__A : Any , ):
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
snake_case__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __A ) != do_lower_case
or normalizer_state.get("strip_accents" , __A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __A ) != tokenize_chinese_chars
):
snake_case__ : Dict = getattr(__A , normalizer_state.pop("type" ) )
snake_case__ : str = do_lower_case
snake_case__ : Optional[int] = strip_accents
snake_case__ : int = tokenize_chinese_chars
snake_case__ : Tuple = normalizer_class(**__A )
snake_case__ : Union[str, Any] = do_lower_case
def _lowercase ( self : Any , __A : int , __A : Union[str, Any]=None ):
snake_case__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : str = [self.sep_token_id]
snake_case__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
snake_case__ : Tuple = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 297
| 1
|
def A(__a: int ):
if not isinstance(__a , __a ):
raise TypeError("only integers accepted as input" )
else:
lowerCAmelCase_ = str(abs(__a ) )
lowerCAmelCase_ = [list(__a ) for char in range(len(__a ) )]
for index in range(len(__a ) ):
num_transpositions[index].pop(__a )
return max(
int("".join(list(__a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 707
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase__ = '''Usage of script: script_name <size_of_canvas:int>'''
lowerCamelCase__ = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def A(__a: int ):
lowerCAmelCase_ = [[False for i in range(__a )] for j in range(__a )]
return canvas
def A(__a: list[list[bool]] ):
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
lowerCAmelCase_ = bool(random.getrandbits(1 ) )
def A(__a: list[list[bool]] ):
lowerCAmelCase_ = np.array(__a )
lowerCAmelCase_ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
lowerCAmelCase_ = __judge_point(
__a , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowerCAmelCase_ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowerCAmelCase_ = current_canvas.tolist()
return return_canvas
def A(__a: bool , __a: list[list[bool]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowerCAmelCase_ = pt
if pt:
if alive < 2:
lowerCAmelCase_ = False
elif alive == 2 or alive == 3:
lowerCAmelCase_ = True
elif alive > 3:
lowerCAmelCase_ = False
else:
if alive == 3:
lowerCAmelCase_ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase__ = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase__ = create_canvas(canvas_size)
seed(c)
lowerCamelCase__ , lowerCamelCase__ = plt.subplots()
fig.show()
lowerCamelCase__ = ListedColormap(['''w''', '''k'''])
try:
while True:
lowerCamelCase__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 226
| 0
|
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _a ( __lowerCAmelCase : Tuple ):
"""simple docstring"""
snake_case__ : Dict = model.config
snake_case__ : Optional[Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
snake_case__ : List[str] = MBartConfig(
is_decoder=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_lowerCAmelCase , add_final_layer_norm=_lowerCAmelCase , )
return encoder_config, decoder_config
def _a ( __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if "encoder.model" in name:
snake_case__ : Dict = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
snake_case__ : Union[str, Any] = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
snake_case__ : List[Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case__ : List[str] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
snake_case__ : Any = '''encoder.''' + name
if "attn.proj" in name:
snake_case__ : Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
snake_case__ : List[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case__ : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case__ : str = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case__ : List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case__ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
snake_case__ : List[Any] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
snake_case__ : str = '''encoder.layernorm.bias'''
return name
def _a ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case__ : Optional[Any] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
snake_case__ : int = key.split('''.''' )
snake_case__ : Union[str, Any] = int(key_split[3] )
snake_case__ : Any = int(key_split[5] )
snake_case__ : Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ : str = val[:dim, :]
snake_case__ : int = val[dim : dim * 2, :]
snake_case__ : int = val[-dim:, :]
else:
snake_case__ : Optional[Any] = val[:dim]
snake_case__ : Union[str, Any] = val[dim : dim * 2]
snake_case__ : List[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
snake_case__ : List[str] = val
return orig_state_dict
def _a ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Dict=False ):
"""simple docstring"""
snake_case__ : Tuple = DonutModel.from_pretrained(_lowerCAmelCase ).eval()
# load HuggingFace model
snake_case__ , snake_case__ : Optional[Any] = get_configs(_lowerCAmelCase )
snake_case__ : Dict = DonutSwinModel(_lowerCAmelCase )
snake_case__ : str = MBartForCausalLM(_lowerCAmelCase )
snake_case__ : Dict = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
snake_case__ : List[Any] = original_model.state_dict()
snake_case__ : Tuple = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# verify results on scanned document
snake_case__ : List[str] = load_dataset('''hf-internal-testing/example-documents''' )
snake_case__ : Union[str, Any] = dataset['''test'''][0]['''image'''].convert('''RGB''' )
snake_case__ : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained(_lowerCAmelCase , from_slow=_lowerCAmelCase )
snake_case__ : Optional[int] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
snake_case__ : str = DonutProcessor(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Dict = processor(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
snake_case__ : Union[str, Any] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
snake_case__ : Dict = '''When is the coffee break?'''
snake_case__ : List[Any] = task_prompt.replace('''{user_input}''' , _lowerCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
snake_case__ : Tuple = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
snake_case__ : Optional[int] = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
snake_case__ : Tuple = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
snake_case__ : Optional[int] = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
snake_case__ : List[Any] = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
snake_case__ : Optional[int] = original_model.decoder.tokenizer(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
snake_case__ : Optional[int] = original_model.encoder.model.patch_embed(_lowerCAmelCase )
snake_case__ , snake_case__ : List[Any] = model.encoder.embeddings(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
# verify encoder hidden states
snake_case__ : int = original_model.encoder(_lowerCAmelCase )
snake_case__ : Optional[int] = model.encoder(_lowerCAmelCase ).last_hidden_state
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2 )
# verify decoder hidden states
snake_case__ : Union[str, Any] = original_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).logits
snake_case__ : List[Any] = model(_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
lowerCAmelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 347
|
def _lowerCAmelCase ( _lowerCAmelCase = 1000 ) -> int:
'''simple docstring'''
__snake_case = 2**power
__snake_case = str(_lowerCAmelCase )
__snake_case = list(_lowerCAmelCase )
__snake_case = 0
for i in list_num:
sum_of_num += int(_lowerCAmelCase )
return sum_of_num
if __name__ == "__main__":
A : Optional[Any] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
A : List[Any] = solution(power)
print('Sum of the digits is: ', result)
| 371
| 0
|
'''simple docstring'''
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCamelCase = 6
UpperCamelCase = 1
UpperCamelCase = 1_901
UpperCamelCase = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCamelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCamelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCamelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCamelCase = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 704
|
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase : List[str] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase : Tuple = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase : str = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def A ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any="auto" , UpperCamelCase__ : List[str]=-1 , UpperCamelCase__ : int=0.9 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Union[str, Any]=5_0_0 , UpperCamelCase__ : Union[str, Any]="gpt2-large" , UpperCamelCase__ : Union[str, Any]=-1 , UpperCamelCase__ : Dict=1_0_2_4 , UpperCamelCase__ : Dict=2_5 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=2_5 , ):
"""simple docstring"""
UpperCamelCase = compute_mauve(
p_text=UpperCamelCase__ , q_text=UpperCamelCase__ , p_features=UpperCamelCase__ , q_features=UpperCamelCase__ , p_tokens=UpperCamelCase__ , q_tokens=UpperCamelCase__ , num_buckets=UpperCamelCase__ , pca_max_data=UpperCamelCase__ , kmeans_explained_var=UpperCamelCase__ , kmeans_num_redo=UpperCamelCase__ , kmeans_max_iter=UpperCamelCase__ , featurize_model_name=UpperCamelCase__ , device_id=UpperCamelCase__ , max_text_length=UpperCamelCase__ , divergence_curve_discretization_size=UpperCamelCase__ , mauve_scaling_factor=UpperCamelCase__ , verbose=UpperCamelCase__ , seed=UpperCamelCase__ , )
return out
| 324
| 0
|
import math
def lowercase ( ) -> None:
UpperCAmelCase_: Any = input("Enter message: " )
UpperCAmelCase_: Optional[Any] = int(input(f"Enter key [2-{len(_a ) - 1}]: " ) )
UpperCAmelCase_: Any = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_: Optional[int] = encrypt_message(_a ,_a )
elif mode.lower().startswith("d" ):
UpperCAmelCase_: List[Any] = decrypt_message(_a ,_a )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"Output:\n{text + '|'}" )
def lowercase ( _a ,_a ) -> str:
UpperCAmelCase_: Optional[Any] = [''] * key
for col in range(_a ):
UpperCAmelCase_: Optional[Any] = col
while pointer < len(_a ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_a )
def lowercase ( _a ,_a ) -> str:
UpperCAmelCase_: List[Any] = math.ceil(len(_a ) / key )
UpperCAmelCase_: Tuple = key
UpperCAmelCase_: Optional[Any] = (num_cols * num_rows) - len(_a )
UpperCAmelCase_: int = [''] * num_cols
UpperCAmelCase_: Union[str, Any] = 0
UpperCAmelCase_: Dict = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_: List[str] = 0
row += 1
return "".join(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 137
|
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 1_0001 ) -> int:
try:
SCREAMING_SNAKE_CASE_ : int = int(SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE_ : list[int] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
while len(SCREAMING_SNAKE_CASE ) < nth:
if is_prime(SCREAMING_SNAKE_CASE ):
primes.append(SCREAMING_SNAKE_CASE )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 345
| 0
|
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def snake_case_ ( SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=10_26 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE__="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
_snake_case , _snake_case = generate_datasets(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ , min_len=10_26 , trim=SCREAMING_SNAKE_CASE__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
_snake_case = load_gpta("gpt2" ).to(SCREAMING_SNAKE_CASE__ )
print("computing perplexity on objective set" )
_snake_case = compute_perplexity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).item()
print("perplexity on objective set:" , SCREAMING_SNAKE_CASE__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=15 , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
_snake_case = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
_snake_case = SecondaryLearner(SCREAMING_SNAKE_CASE__ )
# Train secondary learner
_snake_case = train_secondary_learner(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_epochs=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , eval_freq=1_00 , igf_model_path=SCREAMING_SNAKE_CASE__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=10_00 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=recopy_gpta , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__="gpt2_finetuned.pt" , ):
'''simple docstring'''
_snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
_snake_case = RandomSampler(SCREAMING_SNAKE_CASE__ )
_snake_case = DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ )
_snake_case = max_steps // (len(SCREAMING_SNAKE_CASE__ )) + 1
_snake_case = 0
_snake_case = torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case , _snake_case = recopy_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(SCREAMING_SNAKE_CASE__ )
secondary_learner.eval()
_snake_case = []
_snake_case = 0
_snake_case = []
_snake_case = []
# Compute the performance of the transformer model at the beginning
_snake_case = compute_perplexity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
test_perps.append(SCREAMING_SNAKE_CASE__ )
print("Test perplexity, step" , SCREAMING_SNAKE_CASE__ , ":" , SCREAMING_SNAKE_CASE__ )
for epoch in range(int(SCREAMING_SNAKE_CASE__ ) ):
for step, example in enumerate(SCREAMING_SNAKE_CASE__ ):
torch.cuda.empty_cache()
_snake_case = random.randint(0 , example.size(2 ) - context_len - 1 )
_snake_case = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_snake_case = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
_snake_case = True
if secondary_learner is not None:
_snake_case = secondary_learner.forward(
torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(SCREAMING_SNAKE_CASE__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_snake_case = -1
if predicted_q < threshold:
_snake_case = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_snake_case = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_snake_case = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_snake_case = compute_perplexity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
test_perps.append(SCREAMING_SNAKE_CASE__ )
print("Test perplexity, step" , SCREAMING_SNAKE_CASE__ , ":" , SCREAMING_SNAKE_CASE__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def snake_case_ ( ):
'''simple docstring'''
_snake_case = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=SCREAMING_SNAKE_CASE__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=1_00 , type=SCREAMING_SNAKE_CASE__ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=1_00 , type=SCREAMING_SNAKE_CASE__ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=10_00 , type=SCREAMING_SNAKE_CASE__ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=1_28 , type=SCREAMING_SNAKE_CASE__ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=SCREAMING_SNAKE_CASE__ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=SCREAMING_SNAKE_CASE__ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=1_00 , type=SCREAMING_SNAKE_CASE__ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=10_26 , type=SCREAMING_SNAKE_CASE__ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=SCREAMING_SNAKE_CASE__ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=SCREAMING_SNAKE_CASE__ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=SCREAMING_SNAKE_CASE__ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE__ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
_snake_case = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
_snake_case = training_secondary_learner(
SCREAMING_SNAKE_CASE__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
_snake_case = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_snake_case , _snake_case = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE__ , secondary_learner=SCREAMING_SNAKE_CASE__ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 368
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__magic_name__ : Union[str, Any] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__magic_name__ : int = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0]
@deprecated(SCREAMING_SNAKE_CASE__ , "Please use tf.data to implement this functionality." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_51:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = bytestream.read(rows * cols * num_images )
_snake_case = numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
_snake_case = data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE__ , "Please use tf.one_hot on tensors." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = labels_dense.shape[0]
_snake_case = numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes
_snake_case = numpy.zeros((num_labels, num_classes) )
_snake_case = 1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE__ , "Please use tf.data to implement this functionality." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=10 ):
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_49:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = bytestream.read(SCREAMING_SNAKE_CASE__ )
_snake_case = numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return labels
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@deprecated(
lowerCamelCase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=dtypes.floataa , lowerCamelCase=True , lowerCamelCase=None , ):
_snake_case , _snake_case = random_seed.get_seed(lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_snake_case = dtypes.as_dtype(lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
_snake_case = 10_000
_snake_case = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
_snake_case = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_snake_case = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_snake_case = images.astype(numpy.floataa )
_snake_case = numpy.multiply(lowerCamelCase , 1.0 / 255.0 )
_snake_case = images
_snake_case = labels
_snake_case = 0
_snake_case = 0
@property
def UpperCamelCase( self ):
return self._images
@property
def UpperCamelCase( self ):
return self._labels
@property
def UpperCamelCase( self ):
return self._num_examples
@property
def UpperCamelCase( self ):
return self._epochs_completed
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=True ):
if fake_data:
_snake_case = [1] * 784
_snake_case = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowerCamelCase )],
[fake_label for _ in range(lowerCamelCase )],
)
_snake_case = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase )
_snake_case = self.images[perma]
_snake_case = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_snake_case = self._num_examples - start
_snake_case = self._images[start : self._num_examples]
_snake_case = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase )
_snake_case = self.images[perm]
_snake_case = self.labels[perm]
# Start next epoch
_snake_case = 0
_snake_case = batch_size - rest_num_examples
_snake_case = self._index_in_epoch
_snake_case = self._images[start:end]
_snake_case = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_snake_case = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE__ , "Please write your own downloading logic." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE__ )
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f:
_snake_case = f.size()
print("Successfully downloaded" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "bytes." )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=dtypes.floataa , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=50_00 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ )
_snake_case = fake()
_snake_case = fake()
_snake_case = fake()
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
if not source_url: # empty string check
_snake_case = DEFAULT_SOURCE_URL
_snake_case = "train-images-idx3-ubyte.gz"
_snake_case = "train-labels-idx1-ubyte.gz"
_snake_case = "t10k-images-idx3-ubyte.gz"
_snake_case = "t10k-labels-idx1-ubyte.gz"
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_images(SCREAMING_SNAKE_CASE__ )
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_images(SCREAMING_SNAKE_CASE__ )
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ):
_snake_case = (
"Validation size should be between 0 and "
f'''{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
_snake_case = train_images[:validation_size]
_snake_case = train_labels[:validation_size]
_snake_case = train_images[validation_size:]
_snake_case = train_labels[validation_size:]
_snake_case = {"dtype": dtype, "reshape": reshape, "seed": seed}
_snake_case = _DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case = _DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case = _DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
| 368
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.