code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def __init__( self , a , a = True , a = None , a = 32 , a = True , a = 1 / 255 , a = True , a = True , a = [0.48_145_466, 0.4_578_275, 0.40_821_073] , a = [0.26_862_954, 0.26_130_258, 0.27_577_711] , a = True , a=7 , a=30 , a=400 , a=3 , ):
lowercase__ : List[str] = parent
lowercase__ : str = do_resize
lowercase__ : str = size if size is not None else {'shortest_edge': 288}
lowercase__ : Tuple = size_divisor
lowercase__ : List[str] = do_rescale
lowercase__ : Optional[int] = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : List[str] = do_center_crop
lowercase__ : str = image_mean
lowercase__ : List[Any] = image_std
lowercase__ : str = do_pad
lowercase__ : Tuple = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : Any = min_resolution
lowercase__ : Optional[Any] = max_resolution
def snake_case_ ( self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case_ ( self , a , a=False):
if not batched:
lowercase__ : Optional[Any] = self.size['shortest_edge']
lowercase__ : List[str] = image_inputs[0]
if isinstance(snake_case_ , Image.Image):
lowercase__ , lowercase__ : Optional[int] = image.size
else:
lowercase__ , lowercase__ : Optional[int] = image.shape[1], image.shape[2]
lowercase__ : List[Any] = size / min(snake_case_ , snake_case_)
if h < w:
lowercase__ , lowercase__ : Union[str, Any] = size, scale * w
else:
lowercase__ , lowercase__ : Optional[Any] = scale * h, size
lowercase__ : Dict = int((1333 / 800) * size)
if max(snake_case_ , snake_case_) > max_size:
lowercase__ : str = max_size / max(snake_case_ , snake_case_)
lowercase__ : Any = newh * scale
lowercase__ : Tuple = neww * scale
lowercase__ , lowercase__ : Union[str, Any] = int(newh + 0.5), int(neww + 0.5)
lowercase__ , lowercase__ : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
lowercase__ : Optional[Any] = []
for image in image_inputs:
lowercase__ , lowercase__ : str = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
lowercase__ : Union[str, Any] = max(snake_case_ , key=lambda a: item[0])[0]
lowercase__ : Optional[Any] = max(snake_case_ , key=lambda a: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ (lowerCAmelCase_ , unittest.TestCase ):
__lowerCamelCase : str = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case_ ( self):
lowercase__ : Optional[int] = BridgeTowerImageProcessingTester(self)
@property
def snake_case_ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self):
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case_ , 'image_mean'))
self.assertTrue(hasattr(snake_case_ , 'image_std'))
self.assertTrue(hasattr(snake_case_ , 'do_normalize'))
self.assertTrue(hasattr(snake_case_ , 'do_resize'))
self.assertTrue(hasattr(snake_case_ , 'size'))
self.assertTrue(hasattr(snake_case_ , 'size_divisor'))
def snake_case_ ( self):
pass
def snake_case_ ( self):
# Initialize image processor
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_)
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image)
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
lowercase__ , lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(snake_case_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Tuple = image_processing(snake_case_ , return_tensors='pt').pixel_values
lowercase__ , lowercase__ : str = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self):
# Initialize image processor
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_)
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray)
# Test not batched input
lowercase__ : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
lowercase__ , lowercase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(snake_case_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Any = image_processing(snake_case_ , return_tensors='pt').pixel_values
lowercase__ , lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self):
# Initialize image processor
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_)
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor)
# Test not batched input
lowercase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
lowercase__ , lowercase__ : List[str] = self.image_processor_tester.get_expected_values(snake_case_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : List[str] = image_processing(snake_case_ , return_tensors='pt').pixel_values
lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 164
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__SCREAMING_SNAKE_CASE :Tuple = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE :Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__SCREAMING_SNAKE_CASE :Dict = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__SCREAMING_SNAKE_CASE :Any = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__SCREAMING_SNAKE_CASE :Dict = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__SCREAMING_SNAKE_CASE :Any = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __lowercase )
return [m.group(0 ) for m in matches]
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_UpperCAmelCase = collections.defaultdict(__lowercase )
_UpperCAmelCase = collections.defaultdict(__lowercase )
_UpperCAmelCase = collections.defaultdict(__lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__lowercase ):
_UpperCAmelCase = None
if _re_tf_models.match(__lowercase ) is not None:
_UpperCAmelCase = tf_models
_UpperCAmelCase = _re_tf_models.match(__lowercase ).groups()[0]
elif _re_flax_models.match(__lowercase ) is not None:
_UpperCAmelCase = flax_models
_UpperCAmelCase = _re_flax_models.match(__lowercase ).groups()[0]
elif _re_pt_models.match(__lowercase ) is not None:
_UpperCAmelCase = pt_models
_UpperCAmelCase = _re_pt_models.match(__lowercase ).groups()[0]
if lookup_dict is not None:
while len(__lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
_UpperCAmelCase = True
break
# Try again after removing the last word in the name
_UpperCAmelCase = "".join(camel_case_split(__lowercase )[:-1] )
_UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_UpperCAmelCase = list(__lowercase )
all_models.sort()
_UpperCAmelCase = {"model_type": all_models}
_UpperCAmelCase = [pt_models[t] for t in all_models]
_UpperCAmelCase = [tf_models[t] for t in all_models]
_UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_UpperCAmelCase = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_UpperCAmelCase = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_UpperCAmelCase = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_UpperCAmelCase = "AutoTokenizer"
_UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_UpperCAmelCase = [model_mapping, f'TF_{model_mapping}', f'FLAX_{model_mapping}']
_UpperCAmelCase = [auto_class, f'TF_{auto_class}', f'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(__lowercase , __lowercase , __lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(__lowercase , __lowercase ):
continue
# First extract all model_names
_UpperCAmelCase = []
for name in getattr(__lowercase , __lowercase ).values():
if isinstance(__lowercase , __lowercase ):
model_names.append(__lowercase )
else:
model_names.extend(list(__lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = get_frameworks_table()
_UpperCAmelCase = Dataset.from_pandas(__lowercase )
_UpperCAmelCase = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=__lowercase )
_UpperCAmelCase = Dataset.from_json(__lowercase )
_UpperCAmelCase = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(__lowercase ) )
}
_UpperCAmelCase = update_pipeline_and_auto_class_table(__lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_UpperCAmelCase = sorted(table.keys() )
_UpperCAmelCase = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
_UpperCAmelCase = Dataset.from_pandas(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__lowercase , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(__lowercase , "pipeline_tags.json" ) )
if commit_sha is not None:
_UpperCAmelCase = (
f'Update with commit {commit_sha}\n\nSee: '
f'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
_UpperCAmelCase = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=__lowercase , repo_type="dataset" , token=__lowercase , commit_message=__lowercase , )
def UpperCAmelCase_ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
_UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
_UpperCAmelCase = pipeline_tasks[key]["pt"]
if isinstance(__lowercase , (list, tuple) ):
_UpperCAmelCase = model[0]
_UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(__lowercase )
if len(__lowercase ) > 0:
_UpperCAmelCase = ", ".join(__lowercase )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
__SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 236
| 0
|
import numpy as np
class _a :
'''simple docstring'''
def __init__( self ):
__A : Optional[int] = (0, 0)
__A : Union[str, Any] = None
__A : Tuple = 0
__A : List[str] = 0
__A : Dict = 0
def __eq__( self , __UpperCAmelCase ):
return self.position == cell.position
def __UpperCAmelCase( self ):
print(self.position )
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase=(5, 5) ):
__A : Tuple = np.zeros(__UpperCAmelCase )
__A : Tuple = world_size[0]
__A : List[str] = world_size[1]
def __UpperCAmelCase( self ):
print(self.w )
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__A : str = cell.position[0]
__A : Tuple = cell.position[1]
__A : Optional[Any] = []
for n in neughbour_cord:
__A : Optional[int] = current_x + n[0]
__A : Optional[int] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__A : Optional[Any] = Cell()
__A : int = (x, y)
__A : Any = cell
neighbours.append(__UpperCAmelCase )
return neighbours
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
__A : Optional[int] = []
__A : Dict = []
_open.append(_lowercase )
while _open:
__A : List[str] = np.argmin([n.f for n in _open] )
__A : List[str] = _open[min_f]
_closed.append(_open.pop(_lowercase ) )
if current == goal:
break
for n in world.get_neigbours(_lowercase ):
for c in _closed:
if c == n:
continue
__A : List[str] = current.g + 1
__A , __A : Dict = n.position
__A , __A : Dict = goal.position
__A : Optional[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
__A : Optional[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowercase )
__A : Dict = []
while current.parent is not None:
path.append(current.position )
__A : List[Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCamelCase = Gridworld()
# Start position and goal
UpperCamelCase = Cell()
UpperCamelCase = (0, 0)
UpperCamelCase = Cell()
UpperCamelCase = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
UpperCamelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCamelCase = 1
print(world.w)
| 387
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['MobileNetV2FeatureExtractor']
UpperCamelCase = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 387
| 1
|
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class UpperCamelCase (__snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = XLMProphetNetTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Optional[int] = True
def __snake_case ( self :Optional[int] ) ->List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : List[str] = XLMProphetNetTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self :int ) ->Any:
lowercase : Dict = """[PAD]"""
lowercase : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __snake_case ( self :Any ) ->Union[str, Any]:
lowercase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__magic_name__ ) , 1_012 )
def __snake_case ( self :int ) ->int:
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def __snake_case ( self :Optional[Any] ) ->List[Any]:
lowercase : Optional[Any] = XLMProphetNetTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase : Optional[int] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def __snake_case ( self :List[Any] ) ->str:
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def __snake_case ( self :int ) ->Optional[Any]:
lowercase : str = """Hello World!"""
lowercase : Optional[Any] = [35_389, 6_672, 49, 2]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def __snake_case ( self :Optional[int] ) ->Union[str, Any]:
# fmt: off
lowercase : Optional[int] = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 264
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
_lowerCAmelCase = {
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def UpperCamelCase ( _A ) -> List[str]:
lowercase : List[str] = EfficientNetConfig()
lowercase : Any = CONFIG_MAP[model_name]["""hidden_dim"""]
lowercase : List[str] = CONFIG_MAP[model_name]["""width_coef"""]
lowercase : str = CONFIG_MAP[model_name]["""depth_coef"""]
lowercase : int = CONFIG_MAP[model_name]["""image_size"""]
lowercase : List[Any] = CONFIG_MAP[model_name]["""dropout_rate"""]
lowercase : int = CONFIG_MAP[model_name]["""dw_padding"""]
lowercase : Optional[int] = """huggingface/label-files"""
lowercase : int = """imagenet-1k-id2label.json"""
lowercase : Any = 1_000
lowercase : Any = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
lowercase : Optional[int] = {int(_A ): v for k, v in idalabel.items()}
lowercase : int = idalabel
lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ) -> Tuple:
lowercase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Optional[int] = Image.open(requests.get(_A , stream=_A ).raw )
return im
def UpperCamelCase ( _A ) -> Optional[Any]:
lowercase : str = CONFIG_MAP[model_name]["""image_size"""]
lowercase : Optional[int] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_A , )
return preprocessor
def UpperCamelCase ( _A ) -> Optional[int]:
lowercase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
lowercase : Optional[Any] = sorted(set(_A ) )
lowercase : Dict = len(_A )
lowercase : List[str] = {b: str(_A ) for b, i in zip(_A , range(_A ) )}
lowercase : Union[str, Any] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
lowercase : str = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
lowercase : Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase : Optional[int] = """efficientnet.""" + item[1]
lowercase : Any = """classifier.weight"""
lowercase : Tuple = """classifier.bias"""
return key_mapping
def UpperCamelCase ( _A , _A , _A ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase : str = torch.from_numpy(_A ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase : Optional[int] = torch.from_numpy(_A ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase : List[Any] = torch.from_numpy(np.transpose(_A ) )
else:
lowercase : Optional[int] = torch.from_numpy(_A )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_A )
@torch.no_grad()
def UpperCamelCase ( _A , _A , _A , _A ) -> str:
lowercase : Any = model_classes[model_name](
include_top=_A , weights="""imagenet""" , input_tensor=_A , input_shape=_A , pooling=_A , classes=1_000 , classifier_activation="""softmax""" , )
lowercase : Dict = original_model.trainable_variables
lowercase : Any = original_model.non_trainable_variables
lowercase : Any = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase : Dict = param.numpy()
lowercase : List[str] = list(tf_params.keys() )
# Load HuggingFace model
lowercase : str = get_efficientnet_config(_A )
lowercase : List[Any] = EfficientNetForImageClassification(_A ).eval()
lowercase : Optional[int] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
lowercase : int = rename_keys(_A )
replace_params(_A , _A , _A )
# Initialize preprocessor and preprocess input image
lowercase : Optional[int] = convert_image_processor(_A )
lowercase : Any = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase : Union[str, Any] = hf_model(**_A )
lowercase : List[Any] = outputs.logits.detach().numpy()
# Original model inference
lowercase : Optional[Any] = False
lowercase : str = CONFIG_MAP[model_name]["""image_size"""]
lowercase : Optional[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase : Optional[Any] = image.img_to_array(_A )
lowercase : Dict = np.expand_dims(_A , axis=0 )
lowercase : List[str] = original_model.predict(_A )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_A , _A , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_A ):
os.mkdir(_A )
# Save converted model and image processor
hf_model.save_pretrained(_A )
preprocessor.save_pretrained(_A )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase : Dict = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_A )
hf_model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
_lowerCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 264
| 1
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_snake_case : int = logging.getLogger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# save results
if os.path.exists(__lowerCamelCase ):
if os.path.exists(os.path.join(__lowerCamelCase , "config.json" ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , "config.json" ) ):
os.remove(os.path.join(__lowerCamelCase , "config.json" ) )
if os.path.exists(os.path.join(__lowerCamelCase , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , "pytorch_model.bin" ) ):
os.remove(os.path.join(__lowerCamelCase , "pytorch_model.bin" ) )
else:
os.makedirs(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=False ):
__snake_case : List[Any] = 2
if unlogit:
__snake_case : Union[str, Any] = torch.pow(__lowerCamelCase , __lowerCamelCase )
__snake_case : str = p * torch.log(__lowerCamelCase )
__snake_case : List[Any] = 0
return -plogp.sum(dim=-1 )
def lowerCAmelCase_ ( __lowerCamelCase ):
logger.info("lv, h >\t" + "\t".join(F'{x + 1}' for x in range(len(__lowerCamelCase ) ) ) )
for row in range(len(__lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + "\t".join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + "\t".join(F'{x:d}' for x in tensor[row].cpu().data ) )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=False ):
__snake_case : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
__snake_case : Optional[Any] = torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
__snake_case : Union[str, Any] = torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
if head_mask is None:
__snake_case : List[str] = torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__snake_case : Optional[int] = None
__snake_case : Optional[Any] = 0.0
__snake_case : Tuple = 0.0
for step, inputs in enumerate(tqdm(__lowerCamelCase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
__snake_case : int = tuple(t.to(args.device ) for t in inputs )
(__snake_case ) : Optional[int] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__snake_case : int = model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__snake_case : List[str] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCamelCase ):
__snake_case : str = entropy(attn.detach() , __lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__snake_case : List[Any] = 2
__snake_case : Tuple = torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__snake_case : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(__lowerCamelCase )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(__lowerCamelCase )
logger.info("Head ranked by importance scores" )
__snake_case : List[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__snake_case : Tuple = torch.arange(
head_importance.numel() , device=args.device )
__snake_case : Optional[int] = head_ranks.view_as(__lowerCamelCase )
print_ad_tensor(__lowerCamelCase )
return attn_entropy, head_importance, total_loss
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase )
__snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , __lowerCamelCase , original_score * args.masking_threshold )
__snake_case : List[Any] = torch.ones_like(__lowerCamelCase )
__snake_case : List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__snake_case : Union[str, Any] = original_score
while current_score >= original_score * args.masking_threshold:
__snake_case : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__snake_case : List[Any] = float("Inf" )
__snake_case : int = head_importance.view(-1 ).sort()[1]
if len(__lowerCamelCase ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
__snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
__snake_case : Optional[Any] = new_head_mask.view(-1 )
__snake_case : str = 0.0
__snake_case : List[str] = new_head_mask.view_as(__lowerCamelCase )
__snake_case : Any = new_head_mask.clone().detach()
print_ad_tensor(__lowerCamelCase )
# Compute metric and head importance again
__snake_case : Union[str, Any] = compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase )
__snake_case : List[str] = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info("Final head mask" )
print_ad_tensor(__lowerCamelCase )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = datetime.now()
__snake_case : Any = compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase )
__snake_case : Dict = 1 / loss
__snake_case : Optional[Any] = datetime.now() - before_time
__snake_case : int = sum(p.numel() for p in model.parameters() )
__snake_case : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = [
v,
]
assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCamelCase )
__snake_case : Dict = sum(p.numel() for p in model.parameters() )
__snake_case : int = datetime.now()
__snake_case : Optional[Any] = compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , )
__snake_case : Optional[Any] = 1 / loss
__snake_case : Union[str, Any] = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 1_0_0 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowerCamelCase , __lowerCamelCase )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_0_0 )
save_model(__lowerCamelCase , args.output_dir )
def lowerCAmelCase_ ( ):
__snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=__lowerCamelCase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=__lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=__lowerCamelCase , type=__lowerCamelCase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=__lowerCamelCase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=__lowerCamelCase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=__lowerCamelCase , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=__lowerCamelCase , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_2_8 , type=__lowerCamelCase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=__lowerCamelCase , help="Batch size." )
parser.add_argument("--seed" , type=__lowerCamelCase , default=4_2 )
parser.add_argument("--local_rank" , type=__lowerCamelCase , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=__lowerCamelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__lowerCamelCase , default="" , help="Can be used for distant debugging." )
__snake_case : Union[str, Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__snake_case : str = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
__snake_case : Union[str, Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__snake_case : Tuple = torch.device("cuda" , args.local_rank )
__snake_case : int = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__snake_case : Tuple = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__snake_case : int = nn.parallel.DistributedDataParallel(
__lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase )
elif args.n_gpu > 1:
__snake_case : str = nn.DataParallel(__lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , __lowerCamelCase )
# Prepare dataset
__snake_case : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__snake_case : Any = (torch.from_numpy(__lowerCamelCase ),)
__snake_case : str = TensorDataset(*__lowerCamelCase )
__snake_case : Optional[Any] = RandomSampler(__lowerCamelCase )
__snake_case : Optional[int] = DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__snake_case : List[Any] = mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 721
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_snake_case : List[Any] = "src/diffusers"
_snake_case : str = "."
# This is to make sure the diffusers module imported is the one in the repo.
_snake_case : List[str] = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
_snake_case : Dict = spec.loader.load_module()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , __lowerCamelCase ) is not None
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = object_name.split("." )
__snake_case : Any = 0
# First let's find the module where our object lives.
__snake_case : Optional[int] = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase , F'{module}.py' ) ):
i += 1
if i < len(__lowerCamelCase ):
__snake_case : Dict = os.path.join(__lowerCamelCase , parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__lowerCamelCase , F'{module}.py' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
__snake_case : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
__snake_case : Any = ""
__snake_case : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__snake_case : Optional[Any] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index] , __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__snake_case : Dict = lines[start_index:line_index]
return "".join(__lowerCamelCase )
_snake_case : Any = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
_snake_case : Union[str, Any] = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
_snake_case : Optional[int] = re.compile(R"<FILL\s+[^>]*>")
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = code.split("\n" )
__snake_case : List[str] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
__snake_case : str = F'class Bla:\n{code}'
__snake_case : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=__lowerCamelCase )
__snake_case : int = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
__snake_case , __snake_case : List[str] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=False ):
with open(__lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__snake_case : List[Any] = f.readlines()
__snake_case : int = []
__snake_case : Optional[Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
__snake_case : List[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__snake_case , __snake_case , __snake_case : List[str] = search.groups()
__snake_case : List[str] = find_code_in_diffusers(__lowerCamelCase )
__snake_case : str = get_indent(__lowerCamelCase )
__snake_case : Optional[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
__snake_case : Tuple = theoretical_indent
__snake_case : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__snake_case : str = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
__snake_case : Union[str, Any] = lines[line_index]
__snake_case : Any = _should_continue(__lowerCamelCase , __lowerCamelCase ) and re.search(F'^{indent}# End copy' , __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__snake_case : int = lines[start_index:line_index]
__snake_case : int = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
__snake_case : Union[str, Any] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
__snake_case : Optional[Any] = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
__snake_case : Optional[int] = replace_pattern.replace("with" , "" ).split("," )
__snake_case : Tuple = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__snake_case , __snake_case , __snake_case : Optional[Any] = pattern.groups()
__snake_case : Tuple = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if option.strip() == "all-casing":
__snake_case : List[Any] = re.sub(obja.lower() , obja.lower() , __lowerCamelCase )
__snake_case : Union[str, Any] = re.sub(obja.upper() , obja.upper() , __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__snake_case : str = blackify(lines[start_index - 1] + theoretical_code )
__snake_case : Tuple = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__snake_case : str = lines[:start_index] + [theoretical_code] + lines[line_index:]
__snake_case : List[Any] = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(__lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCAmelCase_ ( __lowerCamelCase = False ):
__snake_case : Optional[Any] = glob.glob(os.path.join(__lowerCamelCase , "**/*.py" ) , recursive=__lowerCamelCase )
__snake_case : Dict = []
for filename in all_files:
__snake_case : Union[str, Any] = is_copy_consistent(__lowerCamelCase , __lowerCamelCase )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
__snake_case : Optional[Any] = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case : Union[str, Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 203
| 0
|
"""simple docstring"""
from pathlib import Path
import fire
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Any = Path(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
lowerCAmelCase__ :Tuple = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase__ :Optional[Any] = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open('w' ).write('\n'.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 93
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] =MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase : Dict =TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
__lowerCAmelCase : Optional[Any] = text_generator("""This is a test""" , do_sample=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
__lowerCAmelCase : List[str] = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
__lowerCAmelCase : List[Any] = text_generator("""This is a test""" , do_sample=lowerCAmelCase , num_return_sequences=2 , return_tensors=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
{"""generated_token_ids""": ANY(lowerCAmelCase )},
{"""generated_token_ids""": ANY(lowerCAmelCase )},
] , )
__lowerCAmelCase : List[Any] = text_generator.model.config.eos_token_id
__lowerCAmelCase : str = """<pad>"""
__lowerCAmelCase : Any = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase , )
self.assertEqual(
lowerCAmelCase , [
[
{"""generated_token_ids""": ANY(lowerCAmelCase )},
{"""generated_token_ids""": ANY(lowerCAmelCase )},
],
[
{"""generated_token_ids""": ANY(lowerCAmelCase )},
{"""generated_token_ids""": ANY(lowerCAmelCase )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
__lowerCAmelCase : str = text_generator("""This is a test""" , do_sample=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
__lowerCAmelCase : str = text_generator(["""This is a test""", """This is a second test"""] , do_sample=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Tuple = TextGenerationPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = """Hello I believe in"""
__lowerCAmelCase : Optional[int] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
__lowerCAmelCase : Optional[int] = text_generator(lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
__lowerCAmelCase : Optional[Any] = text_generator(lowerCAmelCase , stop_sequence=""" fe""" )
self.assertEqual(lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe"""}] )
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : int , lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = text_generator.model
__lowerCAmelCase : Optional[int] = text_generator.tokenizer
__lowerCAmelCase : int = text_generator("""This is a test""" )
self.assertEqual(lowerCAmelCase , [{"""generated_text""": ANY(lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__lowerCAmelCase : Dict = text_generator("""This is a test""" , return_full_text=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , [{"""generated_text""": ANY(lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__lowerCAmelCase : Tuple = pipeline(task="""text-generation""" , model=lowerCAmelCase , tokenizer=lowerCAmelCase , return_full_text=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = text_generator("""This is a test""" )
self.assertEqual(lowerCAmelCase , [{"""generated_text""": ANY(lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__lowerCAmelCase : Tuple = text_generator("""This is a test""" , return_full_text=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , [{"""generated_text""": ANY(lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__lowerCAmelCase : Any = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
[{"""generated_text""": ANY(lowerCAmelCase )}, {"""generated_text""": ANY(lowerCAmelCase )}],
[{"""generated_text""": ANY(lowerCAmelCase )}, {"""generated_text""": ANY(lowerCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__lowerCAmelCase : Dict = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
[{"""generated_text""": ANY(lowerCAmelCase )}, {"""generated_text""": ANY(lowerCAmelCase )}],
[{"""generated_text""": ANY(lowerCAmelCase )}, {"""generated_text""": ANY(lowerCAmelCase )}],
] , )
with self.assertRaises(lowerCAmelCase ):
__lowerCAmelCase : List[Any] = text_generator("""test""" , return_full_text=lowerCAmelCase , return_text=lowerCAmelCase )
with self.assertRaises(lowerCAmelCase ):
__lowerCAmelCase : List[Any] = text_generator("""test""" , return_full_text=lowerCAmelCase , return_tensors=lowerCAmelCase )
with self.assertRaises(lowerCAmelCase ):
__lowerCAmelCase : List[str] = text_generator("""test""" , return_text=lowerCAmelCase , return_tensors=lowerCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__lowerCAmelCase : Union[str, Any] = text_generator("""""" )
self.assertEqual(lowerCAmelCase , [{"""generated_text""": ANY(lowerCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__lowerCAmelCase : Union[str, Any] = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__lowerCAmelCase : int = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_00 , max_new_tokens=20 )
__lowerCAmelCase : Dict = text_generator("""This is a test""" * 5_00 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowerCAmelCase ):
text_generator(
"""This is a test""" * 5_00 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
import torch
# Classic `model_kwargs`
__lowerCAmelCase : Union[str, Any] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__lowerCAmelCase : Tuple = pipe("""This is a test""" )
self.assertEqual(
lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__lowerCAmelCase : Dict = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__lowerCAmelCase : int = pipe("""This is a test""" )
self.assertEqual(
lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__lowerCAmelCase : Dict = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__lowerCAmelCase : str = pipe("""This is a test""" )
self.assertEqual(
lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
import torch
__lowerCAmelCase : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
import torch
__lowerCAmelCase : Optional[int] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=lowerCAmelCase , top_p=0.5 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = """Hello world"""
__lowerCAmelCase : Dict = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
__lowerCAmelCase : Optional[Any] = logging.get_logger("""transformers.generation.tf_utils""" )
else:
__lowerCAmelCase : Optional[Any] = logging.get_logger("""transformers.generation.utils""" )
__lowerCAmelCase : Dict = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowerCAmelCase ) as cl:
__lowerCAmelCase : Optional[Any] = text_generator(lowerCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(lowerCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowerCAmelCase ) as cl:
__lowerCAmelCase : Dict = text_generator(lowerCAmelCase , max_new_tokens=1 )
self.assertNotIn(lowerCAmelCase , cl.out )
with CaptureLogger(lowerCAmelCase ) as cl:
__lowerCAmelCase : str = text_generator(lowerCAmelCase , max_length=10 )
self.assertNotIn(lowerCAmelCase , cl.out )
| 651
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __a ( unittest.TestCase ):
def UpperCamelCase ( self : int)-> List[str]:
__lowerCAmelCase ={
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
__lowerCAmelCase ={
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(snake_case_) , snake_case_)
def UpperCamelCase ( self : Tuple)-> Union[str, Any]:
__lowerCAmelCase =np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(snake_case_) , x.transpose()))
__lowerCAmelCase =np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(snake_case_ , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def UpperCamelCase ( self : Union[str, Any])-> Optional[int]:
__lowerCAmelCase =np.random.randn(3 , 4)
__lowerCAmelCase =torch.tensor(snake_case_)
self.assertTrue(np.allclose(transpose(snake_case_) , transpose(snake_case_).numpy()))
__lowerCAmelCase =np.random.randn(3 , 4 , 5)
__lowerCAmelCase =torch.tensor(snake_case_)
self.assertTrue(np.allclose(transpose(snake_case_ , axes=(1, 2, 0)) , transpose(snake_case_ , axes=(1, 2, 0)).numpy()))
@require_tf
def UpperCamelCase ( self : Union[str, Any])-> List[str]:
__lowerCAmelCase =np.random.randn(3 , 4)
__lowerCAmelCase =tf.constant(snake_case_)
self.assertTrue(np.allclose(transpose(snake_case_) , transpose(snake_case_).numpy()))
__lowerCAmelCase =np.random.randn(3 , 4 , 5)
__lowerCAmelCase =tf.constant(snake_case_)
self.assertTrue(np.allclose(transpose(snake_case_ , axes=(1, 2, 0)) , transpose(snake_case_ , axes=(1, 2, 0)).numpy()))
@require_flax
def UpperCamelCase ( self : Optional[int])-> Optional[int]:
__lowerCAmelCase =np.random.randn(3 , 4)
__lowerCAmelCase =jnp.array(snake_case_)
self.assertTrue(np.allclose(transpose(snake_case_) , np.asarray(transpose(snake_case_))))
__lowerCAmelCase =np.random.randn(3 , 4 , 5)
__lowerCAmelCase =jnp.array(snake_case_)
self.assertTrue(np.allclose(transpose(snake_case_ , axes=(1, 2, 0)) , np.asarray(transpose(snake_case_ , axes=(1, 2, 0)))))
def UpperCamelCase ( self : Union[str, Any])-> Any:
__lowerCAmelCase =np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(snake_case_ , (4, 3)) , np.reshape(snake_case_ , (4, 3))))
__lowerCAmelCase =np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(snake_case_ , (12, 5)) , np.reshape(snake_case_ , (12, 5))))
@require_torch
def UpperCamelCase ( self : List[Any])-> Any:
__lowerCAmelCase =np.random.randn(3 , 4)
__lowerCAmelCase =torch.tensor(snake_case_)
self.assertTrue(np.allclose(reshape(snake_case_ , (4, 3)) , reshape(snake_case_ , (4, 3)).numpy()))
__lowerCAmelCase =np.random.randn(3 , 4 , 5)
__lowerCAmelCase =torch.tensor(snake_case_)
self.assertTrue(np.allclose(reshape(snake_case_ , (12, 5)) , reshape(snake_case_ , (12, 5)).numpy()))
@require_tf
def UpperCamelCase ( self : Optional[Any])-> Union[str, Any]:
__lowerCAmelCase =np.random.randn(3 , 4)
__lowerCAmelCase =tf.constant(snake_case_)
self.assertTrue(np.allclose(reshape(snake_case_ , (4, 3)) , reshape(snake_case_ , (4, 3)).numpy()))
__lowerCAmelCase =np.random.randn(3 , 4 , 5)
__lowerCAmelCase =tf.constant(snake_case_)
self.assertTrue(np.allclose(reshape(snake_case_ , (12, 5)) , reshape(snake_case_ , (12, 5)).numpy()))
@require_flax
def UpperCamelCase ( self : Any)-> str:
__lowerCAmelCase =np.random.randn(3 , 4)
__lowerCAmelCase =jnp.array(snake_case_)
self.assertTrue(np.allclose(reshape(snake_case_ , (4, 3)) , np.asarray(reshape(snake_case_ , (4, 3)))))
__lowerCAmelCase =np.random.randn(3 , 4 , 5)
__lowerCAmelCase =jnp.array(snake_case_)
self.assertTrue(np.allclose(reshape(snake_case_ , (12, 5)) , np.asarray(reshape(snake_case_ , (12, 5)))))
def UpperCamelCase ( self : Tuple)-> List[str]:
__lowerCAmelCase =np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(snake_case_) , np.squeeze(snake_case_)))
__lowerCAmelCase =np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(snake_case_ , axis=2) , np.squeeze(snake_case_ , axis=2)))
@require_torch
def UpperCamelCase ( self : int)-> List[str]:
__lowerCAmelCase =np.random.randn(1 , 3 , 4)
__lowerCAmelCase =torch.tensor(snake_case_)
self.assertTrue(np.allclose(squeeze(snake_case_) , squeeze(snake_case_).numpy()))
__lowerCAmelCase =np.random.randn(1 , 4 , 1 , 5)
__lowerCAmelCase =torch.tensor(snake_case_)
self.assertTrue(np.allclose(squeeze(snake_case_ , axis=2) , squeeze(snake_case_ , axis=2).numpy()))
@require_tf
def UpperCamelCase ( self : Optional[int])-> List[str]:
__lowerCAmelCase =np.random.randn(1 , 3 , 4)
__lowerCAmelCase =tf.constant(snake_case_)
self.assertTrue(np.allclose(squeeze(snake_case_) , squeeze(snake_case_).numpy()))
__lowerCAmelCase =np.random.randn(1 , 4 , 1 , 5)
__lowerCAmelCase =tf.constant(snake_case_)
self.assertTrue(np.allclose(squeeze(snake_case_ , axis=2) , squeeze(snake_case_ , axis=2).numpy()))
@require_flax
def UpperCamelCase ( self : Optional[int])-> Optional[int]:
__lowerCAmelCase =np.random.randn(1 , 3 , 4)
__lowerCAmelCase =jnp.array(snake_case_)
self.assertTrue(np.allclose(squeeze(snake_case_) , np.asarray(squeeze(snake_case_))))
__lowerCAmelCase =np.random.randn(1 , 4 , 1 , 5)
__lowerCAmelCase =jnp.array(snake_case_)
self.assertTrue(np.allclose(squeeze(snake_case_ , axis=2) , np.asarray(squeeze(snake_case_ , axis=2))))
def UpperCamelCase ( self : List[Any])-> Any:
__lowerCAmelCase =np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(snake_case_ , axis=1) , np.expand_dims(snake_case_ , axis=1)))
@require_torch
def UpperCamelCase ( self : Optional[int])-> Optional[int]:
__lowerCAmelCase =np.random.randn(3 , 4)
__lowerCAmelCase =torch.tensor(snake_case_)
self.assertTrue(np.allclose(expand_dims(snake_case_ , axis=1) , expand_dims(snake_case_ , axis=1).numpy()))
@require_tf
def UpperCamelCase ( self : List[Any])-> List[Any]:
__lowerCAmelCase =np.random.randn(3 , 4)
__lowerCAmelCase =tf.constant(snake_case_)
self.assertTrue(np.allclose(expand_dims(snake_case_ , axis=1) , expand_dims(snake_case_ , axis=1).numpy()))
@require_flax
def UpperCamelCase ( self : List[str])-> str:
__lowerCAmelCase =np.random.randn(3 , 4)
__lowerCAmelCase =jnp.array(snake_case_)
self.assertTrue(np.allclose(expand_dims(snake_case_ , axis=1) , np.asarray(expand_dims(snake_case_ , axis=1))))
| 709
|
def __lowerCAmelCase ( __lowerCamelCase : int = 3 , __lowerCamelCase : int = 7 , __lowerCamelCase : int = 1000000 ) -> int:
__lowerCAmelCase =0
__lowerCAmelCase =1
for current_denominator in range(1 , limit + 1 ):
__lowerCAmelCase =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowerCAmelCase =current_numerator
__lowerCAmelCase =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 456
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : str = logging.get_logger(__name__)
a__ : List[Any] = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 'canine'
def __init__( self , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1_6_3_8_4 , lowercase=1_6 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase=0Xe000 , lowercase=0Xe001 , lowercase=4 , lowercase=4 , lowercase=8 , lowercase=1_6_3_8_4 , lowercase=1_2_8 , **lowercase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = type_vocab_size
__UpperCamelCase = layer_norm_eps
# Character config:
__UpperCamelCase = downsampling_rate
__UpperCamelCase = upsampling_kernel_size
__UpperCamelCase = num_hash_functions
__UpperCamelCase = num_hash_buckets
__UpperCamelCase = local_transformer_stride
| 601
|
import operator as op
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : str = []
snake_case : Any = lambda __magic_name__ , __magic_name__ : int(x / y ) # noqa: E731 integer division operation
snake_case : Optional[Any] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(__magic_name__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__magic_name__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' )
else:
snake_case : Optional[int] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' )
snake_case : Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(__magic_name__ ) , int(__magic_name__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
_a : Union[str, Any] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 598
| 0
|
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __magic_name__ :
'''simple docstring'''
def __init__( self : Tuple , snake_case_ : Any ):
__snake_case = str(id_ )
__snake_case = None
__snake_case = None
__snake_case = []
__snake_case = {} # {vertex:distance}
def __lt__( self : int , snake_case_ : List[Any] ):
return self.key < other.key
def __repr__( self : List[str] ):
return self.id
def lowerCAmelCase ( self : List[str] , snake_case_ : Optional[int] ):
self.neighbors.append(__A )
def lowerCAmelCase ( self : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int ):
__snake_case = weight
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCAmelCase )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = []
for u in graph:
__snake_case = math.inf
__snake_case = None
__snake_case = 0
__snake_case = graph[:]
while q:
__snake_case = min(_lowerCAmelCase )
q.remove(_lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__snake_case = u
__snake_case = u.edges[v.id]
for i in range(1 , len(_lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
for u in graph:
__snake_case = math.inf
__snake_case = None
__snake_case = 0
__snake_case = list(_lowerCAmelCase )
hq.heapify(_lowerCAmelCase )
while h:
__snake_case = hq.heappop(_lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__snake_case = u
__snake_case = u.edges[v.id]
hq.heapify(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {"+", "-", "*", "/"}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(SCREAMING_SNAKE_CASE ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 614
| 0
|
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_A : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( a__, a__ ):
@register_to_config
def __init__( self , _a , _a = None , _a = None ):
super().__init__()
lowercase : Union[str, Any] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowercase : List[Any] = torch.zeros(_lowerCamelCase , _lowerCamelCase )
else:
lowercase : str = None
lowercase : Optional[int] = torch.nn.Parameter(_lowerCamelCase )
class a__ ( a__ ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
def __init__( self , _a , _a , _a , _a , _a , _a , ):
super().__init__()
self.register_modules(
vqvae=_lowerCamelCase , transformer=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , scheduler=_lowerCamelCase , learned_classifier_free_sampling_embeddings=_lowerCamelCase , )
def __magic_name__ ( self , _a , _a , _a ):
lowercase : int = len(_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else 1
# get prompt text embeddings
lowercase : Tuple = self.tokenizer(
_lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowercase : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase : Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowercase : Any = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowerCamelCase )
# duplicate text embeddings for each generation per prompt
lowercase : str = prompt_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowercase : Any = self.learned_classifier_free_sampling_embeddings.embeddings
lowercase : Tuple = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowerCamelCase , 1 , 1 )
else:
lowercase : List[str] = [""""""] * batch_size
lowercase : Optional[Any] = text_input_ids.shape[-1]
lowercase : Optional[Any] = self.tokenizer(
_lowerCamelCase , padding="max_length" , max_length=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors="pt" , )
lowercase : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowercase : Union[str, Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase : int = negative_prompt_embeds.shape[1]
lowercase : Tuple = negative_prompt_embeds.repeat(1 , _lowerCamelCase , 1 )
lowercase : Union[str, Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : int = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _a , _a = 100 , _a = 5.0 , _a = 1.0 , _a = 1 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase : List[str] = 1
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase : int = len(_lowerCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_lowerCamelCase )}""" )
lowercase : Optional[Any] = batch_size * num_images_per_prompt
lowercase : List[str] = guidance_scale > 1.0
lowercase : Any = self._encode_prompt(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCamelCase , _lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_lowerCamelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
lowercase : List[Any] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowercase : Dict = self.transformer.num_vector_embeds - 1
lowercase : List[str] = torch.full(_lowerCamelCase , _lowerCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
lowercase : Optional[int] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowerCamelCase , device=self.device )
lowercase : List[str] = self.scheduler.timesteps.to(self.device )
lowercase : str = latents
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the sample if we are doing classifier free guidance
lowercase : Optional[int] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowercase : Union[str, Any] = self.transformer(_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , timestep=_lowerCamelCase ).sample
if do_classifier_free_guidance:
lowercase : Union[str, Any] = model_output.chunk(2 )
lowercase : Tuple = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_lowerCamelCase , dim=1 , keepdim=_lowerCamelCase )
lowercase : Union[str, Any] = self.truncate(_lowerCamelCase , _lowerCamelCase )
# remove `log(0)`'s (`-inf`s)
lowercase : Tuple = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowercase : Union[str, Any] = self.scheduler.step(_lowerCamelCase , timestep=_lowerCamelCase , sample=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase : Union[str, Any] = self.vqvae.config.vq_embed_dim
lowercase : Union[str, Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowercase : Optional[Any] = self.vqvae.quantize.get_codebook_entry(_lowerCamelCase , shape=_lowerCamelCase )
lowercase : Optional[Any] = self.vqvae.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase ).sample
lowercase : Any = (image / 2 + 0.5).clamp(0 , 1 )
lowercase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
def __magic_name__ ( self , _a , _a ):
lowercase : Tuple = torch.sort(_lowerCamelCase , 1 , descending=_lowerCamelCase )
lowercase : str = torch.exp(_lowerCamelCase )
lowercase : Any = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowercase : int = torch.full_like(keep_mask[:, 0:1, :] , _lowerCamelCase )
lowercase : List[str] = torch.cat((all_true, keep_mask) , dim=1 )
lowercase : Dict = keep_mask[:, :-1, :]
lowercase : int = keep_mask.gather(1 , indices.argsort(1 ) )
lowercase : List[str] = log_p_x_0.clone()
lowercase : List[str] = -torch.inf # -inf = log(0)
return rv
| 361
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
super().__init__(features=_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase , **_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = Sql(
cache_dir=_lowerCamelCase , features=_lowerCamelCase , sql=_lowerCamelCase , con=_lowerCamelCase , **_lowerCamelCase , )
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : List[Any] = None
self.builder.download_and_prepare(
download_config=_lowerCamelCase , download_mode=_lowerCamelCase , verification_mode=_lowerCamelCase , base_path=_lowerCamelCase , )
# Build dataset for splits
UpperCAmelCase__ : Union[str, Any] = self.builder.as_dataset(
split="""train""" , verification_mode=_lowerCamelCase , in_memory=self.keep_in_memory)
return dataset
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''')
UpperCAmelCase__ : Optional[Any] = dataset
UpperCAmelCase__ : Optional[int] = name
UpperCAmelCase__ : str = con
UpperCAmelCase__ : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase__ : Union[str, Any] = num_proc
UpperCAmelCase__ : List[str] = to_sql_kwargs
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.to_sql_kwargs.pop("""sql""" , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = self.to_sql_kwargs.pop("""con""" , _lowerCamelCase)
UpperCAmelCase__ : int = self.to_sql_kwargs.pop("""index""" , _lowerCamelCase)
UpperCAmelCase__ : Any = self._write(index=_lowerCamelCase , **self.to_sql_kwargs)
return written
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = args
UpperCAmelCase__ : Tuple = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
UpperCAmelCase__ : str = query_table(
table=self.dataset.data , key=slice(_lowerCamelCase , offset + self.batch_size) , indices=self.dataset._indices , )
UpperCAmelCase__ : List[str] = batch.to_pandas()
UpperCAmelCase__ : List[str] = df.to_sql(self.name , self.con , index=_lowerCamelCase , **_lowerCamelCase)
return num_rows or len(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _lowerCamelCase , _lowerCamelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 407
| 0
|
from __future__ import annotations
def _UpperCamelCase ( UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
if len(snake_case_ ) == 0:
return array
lowerCAmelCase__ = min(snake_case_ ), max(snake_case_ )
# Compute the variables
lowerCAmelCase__ = _max - _min + 1
lowerCAmelCase__ = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowerCAmelCase__ = i - _min
lowerCAmelCase__ = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowerCAmelCase__ = 0
for i in range(snake_case_ ):
while holes_repeat[i] > 0:
lowerCAmelCase__ = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : str = input("""Enter numbers separated by comma:\n""")
lowerCamelCase_ : Any = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 710
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__snake_case : List[str] = True
from torch.cuda.amp import autocast
__snake_case : Union[str, Any] = logging.getLogger(__name__)
def _UpperCamelCase ( UpperCamelCase_ : Any=None , UpperCamelCase_ : str=None ) -> Optional[Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__lowercase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''})
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''})
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''})
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.0_5 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''})
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''})
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
_SCREAMING_SNAKE_CASE : List[str] = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : WavaVecaProcessor
_SCREAMING_SNAKE_CASE : Union[bool, str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
def __call__( self , _UpperCamelCase ):
"""simple docstring"""
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
lowerCAmelCase__ = [{'input_values': feature['input_values']} for feature in features]
lowerCAmelCase__ = [{'input_ids': feature['labels']} for feature in features]
lowerCAmelCase__ = self.processor.pad(
_UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
lowerCAmelCase__ = self.processor.pad(
labels=_UpperCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
lowerCAmelCase__ = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
lowerCAmelCase__ = labels
return batch
class __SCREAMING_SNAKE_CASE ( __lowercase):
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
model.train()
lowerCAmelCase__ = self._prepare_inputs(_UpperCamelCase )
if self.use_amp:
with autocast():
lowerCAmelCase__ = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
else:
lowerCAmelCase__ = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase__ = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCamelCase )
else:
loss.backward()
return loss.detach()
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , UpperCamelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowerCAmelCase__ = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
lowerCAmelCase__ = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
lowerCAmelCase__ = F"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(UpperCamelCase_ : Any ):
lowerCAmelCase__ = re.sub(UpperCamelCase_ , '' , batch['sentence'] ).lower() + ' '
return batch
lowerCAmelCase__ = train_dataset.map(UpperCamelCase_ , remove_columns=['sentence'] )
lowerCAmelCase__ = eval_dataset.map(UpperCamelCase_ , remove_columns=['sentence'] )
def extract_all_chars(UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase__ = ' '.join(batch['text'] )
lowerCAmelCase__ = list(set(UpperCamelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowerCAmelCase__ = train_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , batch_size=-1 , keep_in_memory=UpperCamelCase_ , remove_columns=train_dataset.column_names , )
lowerCAmelCase__ = train_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , batch_size=-1 , keep_in_memory=UpperCamelCase_ , remove_columns=eval_dataset.column_names , )
lowerCAmelCase__ = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
lowerCAmelCase__ = {v: k for k, v in enumerate(UpperCamelCase_ )}
lowerCAmelCase__ = vocab_dict[' ']
del vocab_dict[" "]
lowerCAmelCase__ = len(UpperCamelCase_ )
lowerCAmelCase__ = len(UpperCamelCase_ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
lowerCAmelCase__ = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowerCAmelCase__ = min(len(UpperCamelCase_ ) , data_args.max_train_samples )
lowerCAmelCase__ = train_dataset.select(range(UpperCamelCase_ ) )
if data_args.max_val_samples is not None:
lowerCAmelCase__ = eval_dataset.select(range(data_args.max_val_samples ) )
lowerCAmelCase__ = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(UpperCamelCase_ : List[Any] ):
lowerCAmelCase__ , lowerCAmelCase__ = torchaudio.load(batch['path'] )
lowerCAmelCase__ = resampler(UpperCamelCase_ ).squeeze().numpy()
lowerCAmelCase__ = 1_6000
lowerCAmelCase__ = batch['text']
return batch
lowerCAmelCase__ = train_dataset.map(
UpperCamelCase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowerCAmelCase__ = eval_dataset.map(
UpperCamelCase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(UpperCamelCase_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
lowerCAmelCase__ = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(UpperCamelCase_ )
return batch
lowerCAmelCase__ = train_dataset.map(
UpperCamelCase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , )
lowerCAmelCase__ = eval_dataset.map(
UpperCamelCase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowerCAmelCase__ = datasets.load_metric('wer' )
def compute_metrics(UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase__ = pred.predictions
lowerCAmelCase__ = np.argmax(UpperCamelCase_ , axis=-1 )
lowerCAmelCase__ = processor.tokenizer.pad_token_id
lowerCAmelCase__ = processor.batch_decode(UpperCamelCase_ )
# we do not want to group tokens when computing the metrics
lowerCAmelCase__ = processor.batch_decode(pred.label_ids , group_tokens=UpperCamelCase_ )
lowerCAmelCase__ = wer_metric.compute(predictions=UpperCamelCase_ , references=UpperCamelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowerCAmelCase__ = DataCollatorCTCWithPadding(processor=UpperCamelCase_ , padding=UpperCamelCase_ )
# Initialize our Trainer
lowerCAmelCase__ = CTCTrainer(
model=UpperCamelCase_ , data_collator=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=UpperCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowerCAmelCase__ = model_args.model_name_or_path
else:
lowerCAmelCase__ = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model()
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase_ )
)
lowerCAmelCase__ = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics('train' , UpperCamelCase_ )
trainer.save_metrics('train' , UpperCamelCase_ )
trainer.save_state()
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = data_args.max_val_samples if data_args.max_val_samples is not None else len(UpperCamelCase_ )
lowerCAmelCase__ = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics('eval' , UpperCamelCase_ )
trainer.save_metrics('eval' , UpperCamelCase_ )
return results
if __name__ == "__main__":
main()
| 365
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : List[Any] = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 511
|
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase : str = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase : Optional[Any] = BASE_URL + '/user'
# https://github.com/settings/tokens
lowerCAmelCase : Optional[int] = os.environ.get('USER_TOKEN', '')
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {
'Authorization': f"token {auth_token}",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(a , headers=a ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 511
| 1
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''mask2former'''
_UpperCamelCase : Tuple = ['''swin''']
_UpperCamelCase : Optional[Any] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[int] , _A : Optional[Dict] = None , _A : int = 256 , _A : int = 256 , _A : int = 256 , _A : int = 1_024 , _A : str = "relu" , _A : int = 6 , _A : int = 10 , _A : int = 8 , _A : float = 0.0 , _A : int = 2_048 , _A : bool = False , _A : bool = False , _A : int = 4 , _A : int = 255 , _A : int = 100 , _A : float = 0.1 , _A : float = 2.0 , _A : float = 5.0 , _A : float = 5.0 , _A : int = 12_544 , _A : float = 3.0 , _A : float = 0.75 , _A : float = 0.02 , _A : float = 1.0 , _A : bool = True , _A : List[int] = [4, 8, 16, 32] , _A : bool = None , **_A : Tuple , ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
lowercase : str = CONFIG_MAPPING['''swin'''](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_A , _A ):
lowercase : int = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : Dict = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
lowercase : Optional[int] = backbone_config
lowercase : Optional[Any] = feature_size
lowercase : Tuple = mask_feature_size
lowercase : Dict = hidden_dim
lowercase : Union[str, Any] = encoder_feedforward_dim
lowercase : List[Any] = activation_function
lowercase : Optional[int] = encoder_layers
lowercase : Optional[Any] = decoder_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : int = dropout
lowercase : str = dim_feedforward
lowercase : Optional[int] = pre_norm
lowercase : List[str] = enforce_input_projection
lowercase : Any = common_stride
lowercase : Any = ignore_value
lowercase : Optional[int] = num_queries
lowercase : str = no_object_weight
lowercase : Optional[int] = class_weight
lowercase : List[Any] = mask_weight
lowercase : Optional[int] = dice_weight
lowercase : str = train_num_points
lowercase : Optional[Any] = oversample_ratio
lowercase : Optional[int] = importance_sample_ratio
lowercase : List[Any] = init_std
lowercase : Optional[Any] = init_xavier_std
lowercase : Union[str, Any] = use_auxiliary_loss
lowercase : List[str] = feature_strides
lowercase : List[str] = output_auxiliary_logits
lowercase : List[Any] = decoder_layers
super().__init__(**_A )
@classmethod
def __a ( cls : Optional[int] , _A : PretrainedConfig , **_A : List[Any] ) -> str:
"""simple docstring"""
return cls(
backbone_config=_A , **_A , )
def __a ( self : int ) -> Dict[str, any]:
"""simple docstring"""
lowercase : str = copy.deepcopy(self.__dict__ )
lowercase : str = self.backbone_config.to_dict()
lowercase : Any = self.__class__.model_type
return output
| 596
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A :
_UpperCamelCase : Dict = None
@experimental
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return _map_with_joblib(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = num_proc if num_proc <= len(__magic_name__ ) else len(__magic_name__ )
lowercase : Tuple = [] # We organize the splits ourselve (contiguous splits)
for index in range(__magic_name__ ):
lowercase : Optional[int] = len(__magic_name__ ) // num_proc
lowercase : List[str] = len(__magic_name__ ) % num_proc
lowercase : Union[str, Any] = div * index + min(__magic_name__ , __magic_name__ )
lowercase : List[str] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__magic_name__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(__magic_name__ )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(__magic_name__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
lowercase , lowercase : Optional[int] = None, None
if not disable_tqdm:
lowercase , lowercase : Any = (RLock(),), tqdm.set_lock
with Pool(__magic_name__ , initargs=__magic_name__ , initializer=__magic_name__ ) as pool:
lowercase : Tuple = pool.map(__magic_name__ , __magic_name__ )
logger.info(F"""Finished {num_proc} processes""" )
lowercase : Union[str, Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(__magic_name__ )} objects""" )
return mapped
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__magic_name__ ):
return joblib.Parallel()(
joblib.delayed(__magic_name__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def snake_case( __magic_name__ ) -> List[Any]:
'''simple docstring'''
lowercase : int = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowercase : List[Any] = None
| 596
| 1
|
"""simple docstring"""
def snake_case__ ( _snake_case : float ):
"""simple docstring"""
return 10 - x * x
def snake_case__ ( _snake_case : float , _snake_case : float ):
"""simple docstring"""
if equation(_snake_case ) * equation(_snake_case ) >= 0:
raise ValueError("Wrong space!" )
UpperCamelCase__ = a
while (b - a) >= 0.01:
# Find middle point
UpperCamelCase__ = (a + b) / 2
# Check if middle point is root
if equation(_snake_case ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_snake_case ) * equation(_snake_case ) < 0:
UpperCamelCase__ = c
else:
UpperCamelCase__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 516
|
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def snake_case__ ( _snake_case : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
return quad(_snake_case , 0 , _snake_case , args=(_snake_case) )[0]
def snake_case__ ( _snake_case : float , _snake_case : float ):
"""simple docstring"""
return math.pow(_snake_case , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 516
| 1
|
import doctest
from collections import deque
import numpy as np
class snake_case__:
"""simple docstring"""
def __init__( self : str ):
lowercase__ : Tuple = [2, 1, 2, -1]
lowercase__ : List[str] = [1, 2, 3, 4]
def snake_case ( self : str ):
lowercase__ : int = len(self.first_signal )
lowercase__ : int = len(self.second_signal )
lowercase__ : Optional[Any] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create a zero matrix of max_length x max_length
lowercase__ : Tuple = [[0] * max_length for i in range(SCREAMING_SNAKE_CASE )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = deque(self.second_signal )
rotated_signal.rotate(SCREAMING_SNAKE_CASE )
for j, item in enumerate(SCREAMING_SNAKE_CASE ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowercase__ : List[str] = np.matmul(np.transpose(SCREAMING_SNAKE_CASE ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(SCREAMING_SNAKE_CASE , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 710
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = DiTPipeline
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ = False
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = AutoencoderKL()
lowercase__ : Any = DDIMScheduler()
lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ):
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Any ):
lowercase__ : List[Any] = "cpu"
lowercase__ : str = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 )
def snake_case ( self : str ):
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ):
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ : Dict = ["vase", "umbrella"]
lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 81
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase_ = Features({"""audio""": Audio()} )
lowercase_ = Features({"""transcription""": Value("""string""" )} )
lowercase_ = "audio"
lowercase_ = "transcription"
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any ):
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , SCREAMING_SNAKE_CASE ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowercase__ : int = copy.deepcopy(self )
lowercase__ : List[str] = self.input_schema.copy()
lowercase__ : str = features[self.audio_column]
lowercase__ : int = input_schema
return task_template
@property
def snake_case ( self : Optional[int] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 496
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 496
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[str] = logging.get_logger(__name__)
def lowercase__( A ):
snake_case__ : Optional[int] = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=['stage2', 'stage3', 'stage4'] , )
snake_case__ : Dict = DetaConfig(
backbone_config=A , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=A , with_box_refine=A , two_stage=A , )
# set labels
snake_case__ : List[Any] = 'huggingface/label-files'
if "o365" in model_name:
snake_case__ : str = 3_6_6
snake_case__ : Tuple = 'object365-id2label.json'
else:
snake_case__ : Optional[int] = 9_1
snake_case__ : List[str] = 'coco-detection-id2label.json'
snake_case__ : str = num_labels
snake_case__ : List[str] = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case__ : Union[str, Any] = {int(A ): v for k, v in idalabel.items()}
snake_case__ : Tuple = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
return config
def lowercase__( A ):
snake_case__ : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowercase__( A , A , A ):
snake_case__ : Any = dct.pop(A )
snake_case__ : Tuple = val
def lowercase__( A , A ):
snake_case__ : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case__ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case__ : str = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
snake_case__ : List[Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[Any] = in_proj_weight[:dim, :]
snake_case__ : Optional[int] = in_proj_bias[: dim]
snake_case__ : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
snake_case__ : Dict = in_proj_bias[
dim : dim * 2
]
snake_case__ : List[str] = in_proj_weight[
-dim :, :
]
snake_case__ : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase__( A , A ):
# transformer decoder self-attention layers
snake_case__ : List[Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
snake_case__ : Dict = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ : Any = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Tuple = in_proj_weight[:hidden_size, :]
snake_case__ : Optional[int] = in_proj_bias[:hidden_size]
snake_case__ : Optional[int] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
snake_case__ : Dict = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ : Optional[int] = in_proj_weight[-hidden_size:, :]
snake_case__ : Dict = in_proj_bias[-hidden_size:]
def lowercase__( ):
snake_case__ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Tuple = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowercase__( A , A , A ):
snake_case__ : Tuple = get_deta_config(A )
# load original state dict
if model_name == "deta-swin-large":
snake_case__ : Any = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
snake_case__ : List[str] = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
snake_case__ : Dict = torch.load(A , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(A , param.shape )
# rename keys
snake_case__ : int = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_swin_q_k_v(A , config.backbone_config )
read_in_decoder_q_k_v(A , A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
snake_case__ : Dict = state_dict.pop(A )
snake_case__ : int = val
if "input_proj" in key:
snake_case__ : int = state_dict.pop(A )
snake_case__ : List[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
snake_case__ : Tuple = state_dict.pop(A )
snake_case__ : List[str] = val
# finally, create HuggingFace model and load state dict
snake_case__ : Union[str, Any] = DetaForObjectDetection(A )
model.load_state_dict(A )
model.eval()
snake_case__ : Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(A )
# load image processor
snake_case__ : Tuple = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
snake_case__ : Optional[int] = prepare_img()
snake_case__ : Dict = processor(images=A , return_tensors='pt' )
snake_case__ : List[Any] = encoding['pixel_values']
snake_case__ : Optional[int] = model(pixel_values.to(A ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
snake_case__ : Tuple = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
snake_case__ : Union[str, Any] = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
snake_case__ : Optional[int] = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
snake_case__ : Optional[Any] = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(A ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(A ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
processor.save_pretrained(A )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase =DanceDiffusionPipeline
_lowerCAmelCase =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_lowerCAmelCase =PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
_lowerCAmelCase =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_lowerCAmelCase =False
_lowerCAmelCase =False
def UpperCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
snake_case__ : str = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_lowerCamelCase , use_timestep_embedding=_lowerCamelCase , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
snake_case__ : Any = IPNDMScheduler()
snake_case__ : Any = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCAmelCase__ ( self : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=0 ):
if str(_lowerCamelCase ).startswith('mps' ):
snake_case__ : Dict = torch.manual_seed(_lowerCamelCase )
else:
snake_case__ : Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
snake_case__ : List[str] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCAmelCase__ ( self : int ):
snake_case__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Dict = self.get_dummy_components()
snake_case__ : Optional[int] = DanceDiffusionPipeline(**_lowerCamelCase )
snake_case__ : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : Tuple = self.get_dummy_inputs(_lowerCamelCase )
snake_case__ : Tuple = pipe(**_lowerCamelCase )
snake_case__ : Optional[Any] = output.audios
snake_case__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
snake_case__ : Optional[int] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCAmelCase__ ( self : int ):
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase__ ( self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : Dict = torch_device
snake_case__ : Dict = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
snake_case__ : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Tuple = pipe(generator=_lowerCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
snake_case__ : int = output.audios
snake_case__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case__ : Optional[Any] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : Optional[int] = torch_device
snake_case__ : Any = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
snake_case__ : Tuple = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Dict = pipe(generator=_lowerCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
snake_case__ : Dict = output.audios
snake_case__ : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case__ : Any = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 303
| 1
|
def _lowercase ( a__ : str ) -> list:
"""simple docstring"""
_UpperCamelCase = [0] * len(a__ )
for i in range(1 , len(a__ ) ):
# use last results for better performance - dynamic programming
_UpperCamelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCamelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCamelCase = j
return prefix_result
def _lowercase ( a__ : str ) -> int:
"""simple docstring"""
return max(prefix_function(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = 1
_UpperCamelCase = 3
_UpperCamelCase = (32, 32)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def lowercase ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def lowercase ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
return CLIPTextModel(lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
_UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
_UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=lowerCamelCase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
_UpperCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCamelCase = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
_UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
assert image.shape[0] == 2
_UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_UpperCamelCase = unet.half()
_UpperCamelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
_UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="np" , ).images
_UpperCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowercase ( self ) -> str:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , output_type="np" , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 147
| 1
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : Dict = ReformerTokenizer
_A : Optional[int] = ReformerTokenizerFast
_A : Union[str, Any] = True
_A : Union[str, Any] = False
_A : List[Any] = True
def lowerCamelCase(self ):
super().setUp()
A_ : str = ReformerTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase(self ):
A_ : Tuple = """<s>"""
A_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowerCAmelCase_ ) , 1000 )
def lowerCamelCase(self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase(self ):
if not self.test_rust_tokenizer:
return
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_rust_tokenizer()
A_ : Optional[Any] = """I was born in 92000, and this is falsé."""
A_ : int = tokenizer.tokenize(lowerCAmelCase_ )
A_ : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : List[str] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
A_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : List[Any] = self.get_rust_tokenizer()
A_ : List[Any] = tokenizer.encode(lowerCAmelCase_ )
A_ : Dict = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A_ : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
# Simple input
A_ : Tuple = """This is a simple input"""
A_ : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
A_ : Optional[int] = ("""This is a simple input""", """This is a pair""")
A_ : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" , )
def lowerCamelCase(self ):
pass
def lowerCamelCase(self ):
A_ : str = ReformerTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
A_ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [285, 46, 10, 170, 382] , )
A_ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ : Dict = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A_ : Tuple = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCamelCase(self ):
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def lowerCamelCase(self ):
A_ : Optional[int] = """Hello World!"""
A_ : List[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowerCamelCase(self ):
A_ : Optional[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
A_ : int = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@require_torch
@slow
def lowerCamelCase(self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A_ : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
A_ : Optional[int] = """ """.join(lowerCAmelCase_ )
A_ : Dict = self.big_tokenizer.encode_plus(lowerCAmelCase_ , return_tensors="""pt""" )
A_ : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
A_ : Dict = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A_ : Any = encoded_sequence["""input_ids"""].shape
A_ : Optional[Any] = ReformerModel(lowerCAmelCase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase_ )
model(**lowerCAmelCase_ )
@slow
def lowerCamelCase(self ):
# fmt: off
A_ : Optional[Any] = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A_ : str = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=lowerCAmelCase_ , sequences=lowerCAmelCase_ , )
| 707
|
"""simple docstring"""
import math
def __UpperCamelCase ( snake_case__ , snake_case__ ):
if (
not isinstance(snake_case__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __UpperCamelCase ( snake_case__ , snake_case__ ):
if (
not isinstance(snake_case__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480
| 0
|
import math
def __magic_name__ ( __a : int ):
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = 0
while num > 0:
UpperCamelCase__ = num % 8
UpperCamelCase__ = octal + (remainder * math.floor(math.pow(10 , __a ) ))
counter += 1
UpperCamelCase__ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"0o{int(__a )}"
def __magic_name__ ( ):
'''simple docstring'''
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 513
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __magic_name__ ( __a : Union[str, Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def __magic_name__ ( __a : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 513
| 1
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __snake_case ( self , **A_ ) -> int:
lowerCAmelCase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**A_ )
return config
def __snake_case ( self ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def __snake_case ( self ) -> List[Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=A_ )
def __snake_case ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __snake_case ( self ) -> List[Any]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=A_ )
def __snake_case ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=A_ )
def __snake_case ( self ) -> Optional[Any]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=A_ , prev_timestep=A_ )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowerCAmelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1e-5
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(variance_type="""learned_range""" )
lowerCAmelCase = scheduler_class(**A_ )
lowerCAmelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=A_ ) - -1_0.1_7_1_2_7_9_0 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=A_ ) - -5.7_9_9_8_0_5_2 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=A_ ) - -0.0_0_1_0_0_1_1 < 1e-5
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**A_ )
lowerCAmelCase = scheduler.timesteps
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(A_ ):
# 1. predict noise residual
lowerCAmelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(A_ ) )
lowerCAmelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1e-3
def __snake_case ( self ) -> int:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**A_ )
scheduler.set_timesteps(25 )
lowerCAmelCase = scheduler.timesteps
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(A_ ):
# 1. predict noise residual
lowerCAmelCase = model(A_ , A_ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase = None
else:
lowerCAmelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(
A_ , A_ , A_ , prev_timestep=A_ , generator=A_ ).prev_sample
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(A_ ) )
lowerCAmelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1e-3
def __snake_case ( self ) -> Dict:
pass
def __snake_case ( self ) -> List[str]:
pass
| 344
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=32 , A_=3 , A_=4 , A_=[10, 20, 30, 40] , A_=[2, 2, 3, 2] , A_=True , A_=True , A_=37 , A_="gelu" , A_=10 , A_=0.0_2 , A_=["stage2", "stage3", "stage4"] , A_=[2, 3, 4] , A_=None , ) -> List[str]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_stages
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = scope
def __snake_case ( self ) -> str:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ) -> Union[str, Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self , A_ , A_ , A_ ) -> Tuple:
lowerCAmelCase = ConvNextVaModel(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self , A_ , A_ , A_ ) -> str:
lowerCAmelCase = ConvNextVaForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ ) -> List[Any]:
lowerCAmelCase = ConvNextVaBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = ConvNextVaBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Dict = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : Any = False
UpperCAmelCase : Any = False
UpperCAmelCase : Dict = False
UpperCAmelCase : List[str] = False
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = ConvNextVaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __snake_case ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self ) -> List[Any]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __snake_case ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __snake_case ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __snake_case ( self ) -> List[str]:
pass
def __snake_case ( self ) -> Any:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase = True
if model_class.__name__ in [
*get_values(A_ ),
*get_values(A_ ),
]:
continue
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.train()
lowerCAmelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
lowerCAmelCase = model(**A_ ).loss
loss.backward()
def __snake_case ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase = False
lowerCAmelCase = True
if (
model_class.__name__
in [*get_values(A_ ), *get_values(A_ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
lowerCAmelCase = model(**A_ ).loss
loss.backward()
def __snake_case ( self ) -> str:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A_ )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __snake_case ( self ) -> Optional[Any]:
def check_hidden_states_output(A_ , A_ , A_ ):
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(A_ , A_ ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __snake_case ( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ConvNextVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A_ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = preprocessor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**A_ )
# verify the logits
lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCAmelCase = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 344
| 1
|
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(snake_case_ , snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , ["c"] )
self.assertEqual(snake_case_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , ["a", "c"] )
self.assertEqual(snake_case_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(snake_case_ , [0, 2] , snake_case_ )
self.assertEqual(snake_case_ , ["a", "c"] )
self.assertEqual(snake_case_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(snake_case_ , [-3, -1] , snake_case_ )
self.assertEqual(snake_case_ , ["a", "c"] )
self.assertEqual(snake_case_ , [-3, -1] )
def __A ( self ) -> Dict:
# Stage names must be set
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , snake_case_ )
# Out features must be a list
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(snake_case_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(snake_case_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def __A ( self ) -> List[Any]:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 426
|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def A__ ( A__ , A__ , **A__ ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = AutoConfig.from_pretrained(A__ , **A__ )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(A__ )
model.save_pretrained(A__ )
AutoTokenizer.from_pretrained(A__ ).save_pretrained(A__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 426
| 1
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = embedding_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def __A ( self , A , A , A , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = MegatronBertModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = MegatronBertForMaskedLM(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = MegatronBertForCausalLM(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = MegatronBertForNextSentencePrediction(config=A )
model.to(A )
model.eval()
__magic_name__ = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = MegatronBertForPreTraining(config=A )
model.to(A )
model.eval()
__magic_name__ = model(
A , attention_mask=A , token_type_ids=A , labels=A , next_sentence_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = MegatronBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
__magic_name__ = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = MegatronBertForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = MegatronBertForTokenClassification(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.num_choices
__magic_name__ = MegatronBertForMultipleChoice(config=A )
model.to(A )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_a = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
# test_resize_embeddings = False
_a = False
def __A ( self , A , A , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = MegatronBertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , hidden_size=37 )
def __A ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
return torch.tensor(
snake_case_ , dtype=torch.long , device=snake_case_ , )
a_ : List[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__magic_name__ = os.path.join(os.environ['''MYDIR'''] , A )
__magic_name__ = MegatronBertModel.from_pretrained(A )
model.to(A )
model.half()
__magic_name__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__magic_name__ = model(A )[0]
__magic_name__ = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , A )
__magic_name__ = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
__magic_name__ = output[0, ii, jj]
__magic_name__ = expected[3 * ii + jj]
__magic_name__ = '''ii={} jj={} a={} b={}'''.format(A , A , A , A )
self.assertTrue(math.isclose(A , A , rel_tol=A , abs_tol=A ) , msg=A )
| 718
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12
| 678
| 0
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = IFInpaintingSuperResolutionPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _lowerCAmelCase ( self , _a , _a=0 ):
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_save_load_local()
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 543
|
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 0
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
_A : Optional[Any] = 1
for i in range(1,n + 1 ):
# to compute current row from previous row.
_A : int = min(snake_case_,snake_case_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 54
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 1
|
from math import factorial
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 2_0 ) -> int:
A__ : Tuple =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
A__ : Optional[int] =n // 2
return int(factorial(lowercase_ ) / (factorial(lowercase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__lowerCamelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 416
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A_ ( ) -> int:
_snake_case : Optional[int] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
_snake_case : Tuple = Dataset.from_dict(lowercase_ )
return dataset
class A (__UpperCAmelCase ):
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
_snake_case : int = get_dataset()
_snake_case : Dict = make_duplicate_clusters(lowercase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __a ( self ) -> Tuple:
'''simple docstring'''
_snake_case : Tuple = get_dataset()
_snake_case , _snake_case : Optional[int] = deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowercase_ )
| 326
| 0
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A : Optional[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
__A : int = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
__A : Optional[Any] = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
__A : Union[str, Any] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__A : Optional[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A : int = [0] * args.vocab_size
for k, v in counter.items():
__A : Optional[int] = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 714
|
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self: Tuple, _lowercase: int):
'''simple docstring'''
__lowerCAmelCase = num_of_nodes
__lowerCAmelCase = []
__lowerCAmelCase = {}
def _lowercase ( self: str, _lowercase: int, _lowercase: int, _lowercase: int):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight])
def _lowercase ( self: Optional[Any], _lowercase: int):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def _lowercase ( self: Any, _lowercase: int):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowerCAmelCase = self.find_component(_lowercase)
def _lowercase ( self: Tuple, _lowercase: list[int], _lowercase: int, _lowercase: int):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowerCAmelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase)
elif component_size[u_node] >= component_size[v_node]:
__lowerCAmelCase = self.find_component(_lowercase)
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase)
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
__lowerCAmelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = edge
__lowerCAmelCase = self.m_component[u]
__lowerCAmelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowerCAmelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase, _lowercase):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = edge
__lowerCAmelCase = self.m_component[u]
__lowerCAmelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase, _lowercase, _lowercase)
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''')
num_of_components -= 1
__lowerCAmelCase = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''')
def UpperCAmelCase ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334
| 0
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCamelCase = sys.version_info >= (3, 10)
def a__ ( _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Any=None ) -> int:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _snake_case :
__A : int
__A : float
__A : str
__A : bool
@dataclass
class _snake_case :
__A : int =42
__A : str =field(default="toto" , metadata={"help": "help message"})
@dataclass
class _snake_case :
__A : bool =False
__A : bool =True
__A : Optional[bool] =None
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Optional[int] ="titi"
__A : str ="toto"
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[Any] ="titi"
__A : List[Any] ="toto"
__A : Optional[Any] =42
@dataclass
class _snake_case :
__A : BasicEnum ="toto"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = BasicEnum(self.foo )
@dataclass
class _snake_case :
__A : MixedTypeEnum ="toto"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = MixedTypeEnum(self.foo )
@dataclass
class _snake_case :
__A : Optional[int] =None
__A : Optional[float] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "help message"})
__A : Optional[str] =None
__A : Optional[List[str]] =list_field(default=[])
__A : Optional[List[int]] =list_field(default=[])
@dataclass
class _snake_case :
__A : List[int] =list_field(default=[])
__A : List[int] =list_field(default=[1, 2, 3])
__A : List[str] =list_field(default=["Hallo", "Bonjour", "Hello"])
__A : List[float] =list_field(default=[0.1, 0.2, 0.3])
@dataclass
class _snake_case :
__A : List[int] =field()
__A : str =field()
__A : BasicEnum =field()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = BasicEnum(self.required_enum )
@dataclass
class _snake_case :
__A : int
__A : "BasicEnum" =field()
__A : "Optional[bool]" =None
__A : "str" =field(default="toto" , metadata={"help": "help message"})
__A : "List[str]" =list_field(default=["Hallo", "Bonjour", "Hello"])
if is_python_no_less_than_3_10:
@dataclass
class _snake_case :
__A : bool =False
__A : bool =True
__A : bool | None =None
@dataclass
class _snake_case :
__A : int | None =None
__A : float | None =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "help message"})
__A : str | None =None
__A : list[str] | None =list_field(default=[])
__A : list[int] | None =list_field(default=[])
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
self.assertEqual(len(a._actions ) ,len(b._actions ) )
for x, y in zip(a._actions ,b._actions ):
UpperCAmelCase_ : List[str] = {k: v for k, v in vars(_snake_case ).items() if k != "container"}
UpperCAmelCase_ : List[Any] = {k: v for k, v in vars(_snake_case ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" ,_snake_case ) and yy.get("choices" ,_snake_case ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](_snake_case ) ,yy["type"](_snake_case ) )
del xx["type"], yy["type"]
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
expected.add_argument("--foo" ,type=_snake_case ,required=_snake_case )
expected.add_argument("--bar" ,type=_snake_case ,required=_snake_case )
expected.add_argument("--baz" ,type=_snake_case ,required=_snake_case )
expected.add_argument("--flag" ,type=_snake_case ,default=_snake_case ,const=_snake_case ,nargs="?" )
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((UpperCAmelCase_) , ) : int = parser.parse_args_into_dataclasses(_snake_case ,look_for_args_file=_snake_case )
self.assertFalse(example.flag )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument("--foo" ,default=42 ,type=_snake_case )
expected.add_argument("--baz" ,default="toto" ,type=_snake_case ,help="help message" )
self.argparsersEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" ,type=_snake_case ,default=_snake_case ,const=_snake_case ,nargs="?" )
expected.add_argument("--baz" ,type=_snake_case ,default=_snake_case ,const=_snake_case ,nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" ,action="store_false" ,default=_snake_case ,dest="baz" )
expected.add_argument("--opt" ,type=_snake_case ,default=_snake_case )
UpperCAmelCase_ : Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_snake_case )
for dataclass_type in dataclass_types:
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser(_snake_case )
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
UpperCAmelCase_ : List[str] = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
UpperCAmelCase_ : Any = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
UpperCAmelCase_ : List[str] = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
UpperCAmelCase_ : int = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Any = argparse.ArgumentParser()
expected.add_argument(
"--foo" ,default="toto" ,choices=["titi", "toto", 42] ,type=make_choice_type_function(["titi", "toto", 42] ) ,)
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = parser.parse_args([] )
self.assertEqual(args.foo ,"toto" )
UpperCAmelCase_ : Tuple = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto )
UpperCAmelCase_ : Dict = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo ,"titi" )
UpperCAmelCase_ : Tuple = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi )
UpperCAmelCase_ : int = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo ,42 )
UpperCAmelCase_ : Any = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo )
def UpperCamelCase__ ( self ):
@dataclass
class _snake_case :
__A : Literal["titi", "toto", 42] ="toto"
UpperCAmelCase_ : List[Any] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
"--foo" ,default="toto" ,choices=("titi", "toto", 42) ,type=make_choice_type_function(["titi", "toto", 42] ) ,)
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo ,"toto" )
UpperCAmelCase_ : int = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo ,"titi" )
UpperCAmelCase_ : int = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo ,42 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("--foo_int" ,nargs="+" ,default=[] ,type=_snake_case )
expected.add_argument("--bar_int" ,nargs="+" ,default=[1, 2, 3] ,type=_snake_case )
expected.add_argument("--foo_str" ,nargs="+" ,default=["Hallo", "Bonjour", "Hello"] ,type=_snake_case )
expected.add_argument("--foo_float" ,nargs="+" ,default=[0.1, 0.2, 0.3] ,type=_snake_case )
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = parser.parse_args([] )
self.assertEqual(
_snake_case ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=["Hallo", "Bonjour", "Hello"] ,foo_float=[0.1, 0.2, 0.3] ) ,)
UpperCAmelCase_ : Tuple = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(_snake_case ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=["a", "b", "c"] ,foo_float=[0.1, 0.7] ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = argparse.ArgumentParser()
expected.add_argument("--foo" ,default=_snake_case ,type=_snake_case )
expected.add_argument("--bar" ,default=_snake_case ,type=_snake_case ,help="help message" )
expected.add_argument("--baz" ,default=_snake_case ,type=_snake_case )
expected.add_argument("--ces" ,nargs="+" ,default=[] ,type=_snake_case )
expected.add_argument("--des" ,nargs="+" ,default=[] ,type=_snake_case )
UpperCAmelCase_ : Dict = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_snake_case )
for dataclass_type in dataclass_types:
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_snake_case )
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,bar=_snake_case ,baz=_snake_case ,ces=[] ,des=[] ) )
UpperCAmelCase_ : List[str] = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(_snake_case ,Namespace(foo=12 ,bar=3.14 ,baz="42" ,ces=["a", "b", "c"] ,des=[1, 2, 3] ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = HfArgumentParser(_snake_case )
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument("--required_list" ,nargs="+" ,type=_snake_case ,required=_snake_case )
expected.add_argument("--required_str" ,type=_snake_case ,required=_snake_case )
expected.add_argument(
"--required_enum" ,type=make_choice_type_function(["titi", "toto"] ) ,choices=["titi", "toto"] ,required=_snake_case ,)
self.argparsersEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
expected.add_argument("--foo" ,type=_snake_case ,required=_snake_case )
expected.add_argument(
"--required_enum" ,type=make_choice_type_function(["titi", "toto"] ) ,choices=["titi", "toto"] ,required=_snake_case ,)
expected.add_argument("--opt" ,type=_snake_case ,default=_snake_case )
expected.add_argument("--baz" ,default="toto" ,type=_snake_case ,help="help message" )
expected.add_argument("--foo_str" ,nargs="+" ,default=["Hallo", "Bonjour", "Hello"] ,type=_snake_case )
self.argparsersEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : List[str] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
UpperCAmelCase_ : Any = parser.parse_dict(_snake_case )[0]
UpperCAmelCase_ : Dict = BasicExample(**_snake_case )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = HfArgumentParser(_snake_case )
UpperCAmelCase_ : str = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(_snake_case ,parser.parse_dict ,_snake_case ,allow_extra_keys=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Any = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(_snake_case ,"temp_json" )
os.mkdir(_snake_case )
with open(temp_local_path + ".json" ,"w+" ) as f:
json.dump(_snake_case ,_snake_case )
UpperCAmelCase_ : str = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
UpperCAmelCase_ : Union[str, Any] = BasicExample(**_snake_case )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = HfArgumentParser(_snake_case )
UpperCAmelCase_ : int = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Dict = os.path.join(_snake_case ,"temp_yaml" )
os.mkdir(_snake_case )
with open(temp_local_path + ".yaml" ,"w+" ) as f:
yaml.dump(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
UpperCAmelCase_ : str = BasicExample(**_snake_case )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = HfArgumentParser(_snake_case )
self.assertIsNotNone(_snake_case )
| 71
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 317
| 0
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A : Union[str, Any] = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=_SCREAMING_SNAKE_CASE , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=_SCREAMING_SNAKE_CASE , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=_SCREAMING_SNAKE_CASE , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=_SCREAMING_SNAKE_CASE , default='''data/dump''' , help='''The dump file prefix.''' )
_UpperCAmelCase = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
_UpperCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
_UpperCAmelCase = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
_UpperCAmelCase = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
_UpperCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCAmelCase = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
_UpperCAmelCase = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
_UpperCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCAmelCase = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
_UpperCAmelCase = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
_UpperCAmelCase = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f'{len(_SCREAMING_SNAKE_CASE )} examples to process.' )
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = 1_0000
_UpperCAmelCase = time.time()
for text in data:
_UpperCAmelCase = f'{bos} {text.strip()} {sep}'
_UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
rslt.append(_SCREAMING_SNAKE_CASE )
iter += 1
if iter % interval == 0:
_UpperCAmelCase = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
_UpperCAmelCase = time.time()
logger.info('''Finished binarization''' )
logger.info(f'{len(_SCREAMING_SNAKE_CASE )} examples processed.' )
_UpperCAmelCase = f'{args.dump_file}.{args.tokenizer_name}.pickle'
_UpperCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
_UpperCAmelCase = [np.uintaa(_SCREAMING_SNAKE_CASE ) for d in rslt]
else:
_UpperCAmelCase = [np.intaa(_SCREAMING_SNAKE_CASE ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as handle:
pickle.dump(rslt_ , _SCREAMING_SNAKE_CASE , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 95
|
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__A : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase)
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , **__UpperCamelCase : Optional[Any] )->List[Any]:
super().__init__(**__UpperCamelCase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self : Optional[int] , __UpperCamelCase : Union[np.ndarray, bytes, str] , **__UpperCamelCase : Tuple )->List[str]:
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , **__UpperCamelCase : Any )->Union[str, Any]:
_UpperCAmelCase = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
_UpperCAmelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple="This is a sound of {}." )->int:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCAmelCase = requests.get(__UpperCamelCase ).content
else:
with open(__UpperCamelCase , '''rb''' ) as f:
_UpperCAmelCase = f.read()
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = ffmpeg_read(__UpperCamelCase , self.feature_extractor.sampling_rate )
if not isinstance(__UpperCamelCase , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
_UpperCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
_UpperCAmelCase = candidate_labels
_UpperCAmelCase = [hypothesis_template.format(__UpperCamelCase ) for x in candidate_labels]
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , return_tensors=self.framework , padding=__UpperCamelCase )
_UpperCAmelCase = [text_inputs]
return inputs
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Any:
_UpperCAmelCase = model_inputs.pop('''candidate_labels''' )
_UpperCAmelCase = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __UpperCamelCase ):
_UpperCAmelCase = text_inputs[0]
else:
# Batching case.
_UpperCAmelCase = text_inputs[0][0]
_UpperCAmelCase = self.model(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowercase__ ( self : List[str] , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = model_outputs.pop('''candidate_labels''' )
_UpperCAmelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
_UpperCAmelCase = logits.softmax(dim=0 )
_UpperCAmelCase = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
_UpperCAmelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda __UpperCamelCase : -x[0] )
]
return result
| 95
| 1
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a = logging.getLogger(__name__)
def UpperCamelCase_( __magic_name__ : torch.nn.Module , __magic_name__ : BnbQuantizationConfig , __magic_name__ : Union[str, os.PathLike] = None , __magic_name__ : Optional[Dict[str, Union[int, str, torch.device]]] = None , __magic_name__ : Optional[List[str]] = None , __magic_name__ : Optional[Dict[Union[int, str], Union[int, str]]] = None , __magic_name__ : Optional[Union[str, os.PathLike]] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = bnb_quantization_config.load_in_abit
_lowerCAmelCase :Union[str, Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
_lowerCAmelCase :List[Any] = []
# custom device map
if isinstance(__magic_name__ , __magic_name__ ) and len(device_map.keys() ) > 1:
_lowerCAmelCase :Optional[Any] = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_lowerCAmelCase :str = get_keys_to_not_convert(__magic_name__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__magic_name__ )
_lowerCAmelCase :int = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_lowerCAmelCase :List[str] = []
_lowerCAmelCase :Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__magic_name__ )
# compatibility with peft
_lowerCAmelCase :Union[str, Any] = load_in_abit
_lowerCAmelCase :int = load_in_abit
_lowerCAmelCase :List[str] = get_parameter_device(__magic_name__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
_lowerCAmelCase :List[str] = replace_with_bnb_layers(__magic_name__ , __magic_name__ , modules_to_not_convert=__magic_name__ )
# convert param to the right dtype
_lowerCAmelCase :Optional[Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_lowerCAmelCase :Dict = name.replace('.weight' , '' ).replace('.bias' , '' )
_lowerCAmelCase :Tuple = getattr(__magic_name__ , __magic_name__ , __magic_name__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__magic_name__ ):
param.to(__magic_name__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
_lowerCAmelCase :Dict = replace_with_bnb_layers(
__magic_name__ , __magic_name__ , modules_to_not_convert=__magic_name__ )
_lowerCAmelCase :Optional[Any] = get_quantized_model_device_map(
__magic_name__ , __magic_name__ , __magic_name__ , max_memory=__magic_name__ , no_split_module_classes=__magic_name__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_lowerCAmelCase :Any = True
_lowerCAmelCase :Tuple = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__magic_name__ , __magic_name__ , __magic_name__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=__magic_name__ , offload_state_dict=__magic_name__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__magic_name__ , device_map=__magic_name__ , offload_dir=__magic_name__ )
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
_lowerCAmelCase :Union[str, Any] = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__magic_name__ , __magic_name__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
_lowerCAmelCase :Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_lowerCAmelCase :List[str] = {}
_lowerCAmelCase :List[str] = special_dtypes
_lowerCAmelCase :Tuple = no_split_module_classes
_lowerCAmelCase :Optional[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_lowerCAmelCase :Optional[int] = get_balanced_memory(
__magic_name__ , low_zero=(device_map == 'balanced_low_0') , max_memory=__magic_name__ , **__magic_name__ , )
_lowerCAmelCase :Tuple = max_memory
_lowerCAmelCase :str = infer_auto_device_map(__magic_name__ , **__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ ):
# check if don't have any quantized module on the cpu
_lowerCAmelCase :Dict = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_lowerCAmelCase :Tuple = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : str=None , __magic_name__ : int=None ):
"""simple docstring"""
if modules_to_not_convert is None:
_lowerCAmelCase :Dict = []
_lowerCAmelCase , _lowerCAmelCase :List[Any] = _replace_with_bnb_layers(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase_( __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Union[str, Any]=None , __magic_name__ : str=None , ):
"""simple docstring"""
_lowerCAmelCase :Any = False
for name, module in model.named_children():
if current_key_name is None:
_lowerCAmelCase :Dict = []
current_key_name.append(__magic_name__ )
if isinstance(__magic_name__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_lowerCAmelCase :Dict = '.'.join(__magic_name__ )
_lowerCAmelCase :Tuple = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_lowerCAmelCase :Tuple = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_lowerCAmelCase :Any = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__magic_name__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_lowerCAmelCase :Tuple = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
_lowerCAmelCase :int = module.weight.data
if module.bias is not None:
_lowerCAmelCase :str = module.bias.data
bnb_module.requires_grad_(__magic_name__ )
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :int = True
if len(list(module.children() ) ) > 0:
_lowerCAmelCase , _lowerCAmelCase :Any = _replace_with_bnb_layers(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
with init_empty_weights():
_lowerCAmelCase :str = deepcopy(__magic_name__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_lowerCAmelCase :Optional[Any] = find_tied_parameters(__magic_name__ )
# For compatibility with Accelerate < 0.18
if isinstance(__magic_name__ , __magic_name__ ):
_lowerCAmelCase :int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCAmelCase :Union[str, Any] = sum(__magic_name__ , [] )
_lowerCAmelCase :Any = len(__magic_name__ ) > 0
# Check if it is a base model
_lowerCAmelCase :List[Any] = False
if hasattr(__magic_name__ , 'base_model_prefix' ):
_lowerCAmelCase :str = not hasattr(__magic_name__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCAmelCase :Tuple = list(model.named_children() )
_lowerCAmelCase :Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCAmelCase :Tuple = set(__magic_name__ ) - set(__magic_name__ )
_lowerCAmelCase :Any = list(set(__magic_name__ ) ) + list(__magic_name__ )
# remove ".weight" from the keys
_lowerCAmelCase :Optional[int] = ['.weight', '.bias']
_lowerCAmelCase :Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCAmelCase :List[Any] = name.replace(__magic_name__ , '' )
filtered_module_names.append(__magic_name__ )
return filtered_module_names
def UpperCamelCase_( __magic_name__ : Any ):
"""simple docstring"""
for m in model.modules():
if isinstance(__magic_name__ , bnb.nn.Linearabit ):
return True
return False
def UpperCamelCase_( __magic_name__ : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def UpperCamelCase_( __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Tuple ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__magic_name__ , __magic_name__ , 0 , dtype=__magic_name__ , value=__magic_name__ )
_lowerCAmelCase :List[Any] = param_name
_lowerCAmelCase :Dict = model
if "." in tensor_name:
_lowerCAmelCase :List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
_lowerCAmelCase :int = getattr(__magic_name__ , __magic_name__ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
_lowerCAmelCase :Tuple = new_module
_lowerCAmelCase :Optional[Any] = splits[-1]
# offload weights
_lowerCAmelCase :Union[str, Any] = False
offload_weight(module._parameters[tensor_name] , __magic_name__ , __magic_name__ , index=__magic_name__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __magic_name__ , index=__magic_name__ , )
else:
offload_weight(__magic_name__ , __magic_name__ , __magic_name__ , index=__magic_name__ )
offload_weight(__magic_name__ , param_name.replace('weight' , 'SCB' ) , __magic_name__ , index=__magic_name__ )
set_module_tensor_to_device(__magic_name__ , __magic_name__ , 'meta' , dtype=__magic_name__ , value=torch.empty(*param.size() ) )
| 687
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 687
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=1_000 , ) -> List[Any]:
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
SCREAMING_SNAKE_CASE : Any = range_bbox
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : Dict = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : Optional[int] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Dict = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Union[str, Any] = t
SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
SCREAMING_SNAKE_CASE : List[Any] = TFLayoutLMModel(config=lowercase__ )
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = model(lowercase__ , lowercase__ , token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase__ , lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[Any] = TFLayoutLMForMaskedLM(config=lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowercase__ , lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = TFLayoutLMForSequenceClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE : str = model(lowercase__ , lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = TFLayoutLMForTokenClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = model(lowercase__ , lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> str:
SCREAMING_SNAKE_CASE : str = TFLayoutLMForQuestionAnswering(config=lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ , lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Union[str, Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
snake_case__ : str = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : Union[str, Any] = False
snake_case__ : List[Any] = True
snake_case__ : int = 1_0
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = TFLayoutLMModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@slow
def _UpperCamelCase ( self ) -> int:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = TFLayoutLMModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def _UpperCamelCase ( self ) -> Tuple:
pass
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
SCREAMING_SNAKE_CASE : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
# test the sequence output on [0, :3, :3]
SCREAMING_SNAKE_CASE : int = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1E-3 ) )
# test the pooled output on [1, :3]
SCREAMING_SNAKE_CASE : int = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , lowercase__ , atol=1E-3 ) )
@slow
def _UpperCamelCase ( self ) -> Any:
# initialize model with randomly initialized sequence classification head
SCREAMING_SNAKE_CASE : int = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
SCREAMING_SNAKE_CASE : str = prepare_layoutlm_batch_inputs()
# forward pass
SCREAMING_SNAKE_CASE : List[str] = model(
input_ids=lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.loss
SCREAMING_SNAKE_CASE : int = (2,)
self.assertEqual(loss.shape , lowercase__ )
# test the shape of the logits
SCREAMING_SNAKE_CASE : Dict = outputs.logits
SCREAMING_SNAKE_CASE : Any = (2, 2)
self.assertEqual(logits.shape , lowercase__ )
@slow
def _UpperCamelCase ( self ) -> str:
# initialize model with randomly initialized token classification head
SCREAMING_SNAKE_CASE : Union[str, Any] = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
SCREAMING_SNAKE_CASE : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
SCREAMING_SNAKE_CASE : int = model(
input_ids=lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
# test the shape of the logits
SCREAMING_SNAKE_CASE : int = outputs.logits
SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , lowercase__ )
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
# initialize model with randomly initialized token classification head
SCREAMING_SNAKE_CASE : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
SCREAMING_SNAKE_CASE : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
SCREAMING_SNAKE_CASE : List[str] = model(input_ids=lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
# test the shape of the logits
SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , lowercase__ )
self.assertEqual(outputs.end_logits.shape , lowercase__ )
| 707
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase :int = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_lowerCAmelCase :str = {
"""allenai/led-base-16384""": 16_384,
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[Any] = VOCAB_FILES_NAMES
snake_case__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : int = LEDTokenizer
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="replace" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=False , lowercase__=True , **lowercase__ , ) -> str:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : List[Any] = getattr(lowercase__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Dict = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : Dict = 'post_processor'
SCREAMING_SNAKE_CASE : Dict = getattr(self.backend_tokenizer , lowercase__ , lowercase__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Tuple = tuple(state['cls'] )
SCREAMING_SNAKE_CASE : str = False
if state.get('add_prefix_space' , lowercase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Dict = add_prefix_space
SCREAMING_SNAKE_CASE : Optional[int] = True
if state.get('trim_offsets' , lowercase__ ) != trim_offsets:
SCREAMING_SNAKE_CASE : Optional[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Optional[Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : str = getattr(lowercase__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE : Dict = component_class(**lowercase__ )
setattr(self.backend_tokenizer , lowercase__ , lowercase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase ( self , lowercase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else value
SCREAMING_SNAKE_CASE : Union[str, Any] = value
def _UpperCamelCase ( self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE : int = kwargs.get('is_split_into_words' , lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def _UpperCamelCase ( self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE : Dict = kwargs.get('is_split_into_words' , lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def _UpperCamelCase ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def _UpperCamelCase ( self , lowercase__ , lowercase__=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self , lowercase__ , lowercase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = PaddingStrategy.DO_NOT_PAD , lowercase__ = None , lowercase__ = None , ) -> dict:
SCREAMING_SNAKE_CASE : int = super()._pad(
encoded_inputs=lowercase__ , max_length=lowercase__ , padding_strategy=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : str = len(encoded_inputs['global_attention_mask'] ) != len(lowercase__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : Dict = len(lowercase__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Tuple = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 179
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29
|
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCAmelCase__ : str = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**snake_case_ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(snake_case_ )
def __magic_name__ ( self : Dict , **snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : Tuple = {}
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = {}
# preprocess args
if "points_per_batch" in kwargs:
snake_case__ : Any = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
snake_case__ : List[str] = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
snake_case__ : int = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
snake_case__ : int = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
snake_case__ : List[str] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
snake_case__ : Union[str, Any] = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
snake_case__ : int = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
snake_case__ : Optional[Any] = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
snake_case__ : List[str] = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
snake_case__ : int = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
snake_case__ : str = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
snake_case__ : Union[str, Any] = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : str , snake_case_ : Optional[int] , *snake_case_ : Any , snake_case_ : str=None , snake_case_ : List[Any]=None , **snake_case_ : List[Any] ):
'''simple docstring'''
return super().__call__(snake_case_ , *snake_case_ , num_workers=snake_case_ , batch_size=snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[str] , snake_case_ : Any , snake_case_ : Optional[Any]=6_4 , snake_case_ : int = 0 , snake_case_ : float = 5_1_2 / 1_5_0_0 , snake_case_ : Optional[int] = 3_2 , snake_case_ : Optional[int] = 1 , ):
'''simple docstring'''
snake_case__ : Dict = load_image(snake_case_ )
snake_case__ : Optional[int] = self.image_processor.size['''longest_edge''']
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.image_processor.generate_crop_boxes(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self.image_processor(images=snake_case_ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
snake_case__ : List[str] = self.get_inference_context()
with inference_context():
snake_case__ : Dict = self._ensure_tensor_on_device(snake_case_ , device=self.device )
snake_case__ : Tuple = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
snake_case__ : str = image_embeddings
snake_case__ : Dict = grid_points.shape[1]
snake_case__ : int = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , snake_case_ , snake_case_ ):
snake_case__ : str = grid_points[:, i : i + points_per_batch, :, :]
snake_case__ : Optional[Any] = input_labels[:, i : i + points_per_batch]
snake_case__ : List[str] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __magic_name__ ( self : List[str] , snake_case_ : List[str] , snake_case_ : Optional[Any]=0.8_8 , snake_case_ : Dict=0.9_5 , snake_case_ : List[str]=0 , snake_case_ : Dict=1 , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = model_inputs.pop('''input_boxes''' )
snake_case__ : Union[str, Any] = model_inputs.pop('''is_last''' )
snake_case__ : List[str] = model_inputs.pop('''original_sizes''' ).tolist()
snake_case__ : int = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
snake_case__ : List[Any] = self.model(**snake_case_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
snake_case__ : Optional[int] = model_outputs['''pred_masks''']
snake_case__ : Optional[Any] = self.image_processor.post_process_masks(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , binarize=snake_case_ )
snake_case__ : str = model_outputs['''iou_scores''']
snake_case__ , snake_case__ , snake_case__ : Any = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __magic_name__ ( self : List[str] , snake_case_ : Optional[int] , snake_case_ : List[str]=False , snake_case_ : int=False , snake_case_ : Tuple=0.7 , ):
'''simple docstring'''
snake_case__ : Tuple = []
snake_case__ : str = []
snake_case__ : Optional[int] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
snake_case__ : Union[str, Any] = torch.cat(snake_case_ )
snake_case__ : Dict = torch.cat(snake_case_ )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = self.image_processor.post_process_for_mask_generation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Tuple = defaultdict(snake_case_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(snake_case_ )
snake_case__ : str = {}
if output_rle_mask:
snake_case__ : Union[str, Any] = rle_mask
if output_bboxes_mask:
snake_case__ : Union[str, Any] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 347
| 0
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
__lowerCamelCase: str
__lowerCamelCase: str = None
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self : str , a : int , a : int , a : str , **a : Dict ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self : Union[str, Any] , a : Dict ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] ):
'''simple docstring'''
return f"""`pip install {cls.pip_package or cls.name}`"""
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'optuna'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCAmelCase__ ( self : str , a : Any , a : int , a : str , **a : Dict ):
'''simple docstring'''
return run_hp_search_optuna(a , a , a , **a )
def lowerCAmelCase__ ( self : Optional[int] , a : Dict ):
'''simple docstring'''
return default_hp_space_optuna(a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Any = 'ray'
__lowerCamelCase: Optional[Any] = '\'ray[tune]\''
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_ray_available()
def lowerCAmelCase__ ( self : List[str] , a : List[Any] , a : int , a : str , **a : List[str] ):
'''simple docstring'''
return run_hp_search_ray(a , a , a , **a )
def lowerCAmelCase__ ( self : Optional[int] , a : Tuple ):
'''simple docstring'''
return default_hp_space_ray(a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = 'sigopt'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any] , a : int , a : str , **a : int ):
'''simple docstring'''
return run_hp_search_sigopt(a , a , a , **a )
def lowerCAmelCase__ ( self : List[str] , a : Tuple ):
'''simple docstring'''
return default_hp_space_sigopt(a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'wandb'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCAmelCase__ ( self : Dict , a : Optional[Any] , a : int , a : str , **a : Optional[Any] ):
'''simple docstring'''
return run_hp_search_wandb(a , a , a , **a )
def lowerCAmelCase__ ( self : List[Any] , a : Optional[Any] ):
'''simple docstring'''
return default_hp_space_wandb(a )
UpperCamelCase__ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : int = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
lowercase_ : List[str] = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
F"""{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 640
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 1
|
import math
def UpperCamelCase_ ( __a , __a ) -> Dict:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__a )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase : Tuple = """Enter the base and the power separated by a comma: """
UpperCamelCase , UpperCamelCase : str = map(int, input(prompt).split(""","""))
UpperCamelCase , UpperCamelCase : Tuple = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase : Any = res(xa, ya)
UpperCamelCase : List[str] = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 37
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowercase_ :Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase_ :Optional[int] = 1
if upper_limit > 0:
lowercase_ :str = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 ,upper_limit + 1 ):
for j in range(__lowerCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
lowerCAmelCase : Optional[int] =int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 172
| 0
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__SCREAMING_SNAKE_CASE , max_perimeter + 1 ):
_UpperCamelCase =(base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__SCREAMING_SNAKE_CASE ):
_UpperCamelCase =int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _a (__SCREAMING_SNAKE_CASE = 1000 ):
"""simple docstring"""
_UpperCamelCase =pythagorean_triple(__SCREAMING_SNAKE_CASE )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 271
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =cva.getAffineTransform(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return cva.warpAffine(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCamelCase : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
__lowerCamelCase : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCamelCase , __lowerCamelCase : Any = gray_img.shape
# set different points to rotate image
__lowerCamelCase : Optional[Any] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCamelCase : Any = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCamelCase : int = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCamelCase : List[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCamelCase : int = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCamelCase : Optional[Any] = plt.figure(1)
__lowerCamelCase : Tuple = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 271
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ : Tuple = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def SCREAMING_SNAKE_CASE ( __UpperCamelCase=None) -> Optional[Any]:
if subparsers is not None:
a = subparsers.add_parser("tpu-config" , description=_description)
else:
a = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description)
# Core arguments
a = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`.")
config_args.add_argument(
"--config_file" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=__UpperCAmelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=__UpperCAmelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
a = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU.")
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=__UpperCAmelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it.")
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase)
return parser
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> List[str]:
a = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__UpperCAmelCase):
a = load_config_from_file(args.config_file)
if not args.command_file and defaults.command_file is not None and not args.command:
a = defaults.command_file
if not args.command and defaults.commands is not None:
a = defaults.commands
if not args.tpu_name:
a = defaults.tpu_name
if not args.tpu_zone:
a = defaults.tpu_zone
if args.accelerate_version == "dev":
a = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
a = "accelerate -U"
elif isinstance(parse(args.accelerate_version) , __UpperCAmelCase):
a = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod.")
if args.command_file:
with open(args.command_file , "r") as f:
a = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __UpperCAmelCase):
a = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
a = "; ".join(__UpperCAmelCase)
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(__UpperCAmelCase)}''')
return
subprocess.run(__UpperCAmelCase)
print("Successfully setup pod.")
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
a = tpu_command_parser()
a = parser.parse_args()
tpu_command_launcher(__UpperCAmelCase)
| 515
|
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
a__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
a__ = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE )
a__ = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE )
if self.isEnabledFor(SCREAMING_SNAKE_CASE ):
if self._should_log(SCREAMING_SNAKE_CASE ):
a__ , a__ = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif in_order:
a__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
a__ , a__ = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
state.wait_for_everyone()
def __a ( __UpperCAmelCase , __UpperCAmelCase = None ):
if log_level is None:
a__ = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __UpperCAmelCase )
a__ = logging.getLogger(__UpperCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__UpperCAmelCase , {} )
| 194
| 0
|
'''simple docstring'''
import torch
from torch import nn
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=1 , __lowercase=False ):
super().__init__()
__lowerCAmelCase = n_token
__lowerCAmelCase = d_embed
__lowerCAmelCase = d_proj
__lowerCAmelCase = cutoffs + [n_token]
__lowerCAmelCase = [0] + self.cutoffs
__lowerCAmelCase = div_val
__lowerCAmelCase = self.cutoffs[0]
__lowerCAmelCase = len(self.cutoffs ) - 1
__lowerCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__lowerCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__lowerCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
__lowerCAmelCase = nn.ModuleList()
__lowerCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowercase , __lowercase ) ) )
else:
self.out_projs.append(__lowercase )
self.out_layers.append(nn.Linear(__lowercase , __lowercase ) )
else:
for i in range(len(self.cutoffs ) ):
__lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowercase , __lowercase ) ) )
self.out_layers.append(nn.Linear(__lowercase , r_idx - l_idx ) )
__lowerCAmelCase = keep_order
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase ):
if proj is None:
__lowerCAmelCase = nn.functional.linear(__lowercase , __lowercase , bias=__lowercase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__lowerCAmelCase = nn.functional.linear(__lowercase , proj.t().contiguous() )
__lowerCAmelCase = nn.functional.linear(__lowercase , __lowercase , bias=__lowercase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _snake_case (self , __lowercase , __lowercase=None , __lowercase=False ):
if labels is not None:
# Shift so that tokens < n predict n
__lowerCAmelCase = hidden[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
__lowerCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
__lowerCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__lowerCAmelCase = self._compute_logit(__lowercase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__lowerCAmelCase = labels != -1_00
__lowerCAmelCase = torch.zeros_like(__lowercase , dtype=hidden.dtype , device=hidden.device )
__lowerCAmelCase = (
-nn.functional.log_softmax(__lowercase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__lowerCAmelCase = nn.functional.log_softmax(__lowercase , dim=-1 )
else:
# construct weights and biases
__lowerCAmelCase , __lowerCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
__lowerCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowerCAmelCase = self.out_layers[i].weight
__lowerCAmelCase = self.out_layers[i].bias
if i == 0:
__lowerCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__lowerCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowercase )
biases.append(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = weights[0], biases[0], self.out_projs[0]
__lowerCAmelCase = self._compute_logit(__lowercase , __lowercase , __lowercase , __lowercase )
__lowerCAmelCase = nn.functional.log_softmax(__lowercase , dim=1 )
if labels is None:
__lowerCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__lowerCAmelCase = torch.zeros_like(__lowercase , dtype=hidden.dtype , device=hidden.device )
__lowerCAmelCase = 0
__lowerCAmelCase = [0] + self.cutoffs
for i in range(len(__lowercase ) - 1 ):
__lowerCAmelCase , __lowerCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__lowerCAmelCase = (labels >= l_idx) & (labels < r_idx)
__lowerCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__lowerCAmelCase = labels.index_select(0 , __lowercase ) - l_idx
__lowerCAmelCase = head_logprob.index_select(0 , __lowercase )
__lowerCAmelCase = hidden.index_select(0 , __lowercase )
else:
__lowerCAmelCase = hidden
if i == 0:
if labels is not None:
__lowerCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__lowerCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = weights[i], biases[i], self.out_projs[i]
__lowerCAmelCase = self._compute_logit(__lowercase , __lowercase , __lowercase , __lowercase )
__lowerCAmelCase = nn.functional.log_softmax(__lowercase , dim=1 )
__lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__lowerCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__lowerCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__lowerCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowercase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _snake_case (self , __lowercase ):
if self.n_clusters == 0:
__lowerCAmelCase = self._compute_logit(__lowercase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowercase , dim=-1 )
else:
# construct weights and biases
__lowerCAmelCase , __lowerCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
__lowerCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowerCAmelCase = self.out_layers[i].weight
__lowerCAmelCase = self.out_layers[i].bias
if i == 0:
__lowerCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__lowerCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowercase )
biases.append(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = weights[0], biases[0], self.out_projs[0]
__lowerCAmelCase = self._compute_logit(__lowercase , __lowercase , __lowercase , __lowercase )
__lowerCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__lowerCAmelCase = nn.functional.log_softmax(__lowercase , dim=1 )
__lowerCAmelCase = [0] + self.cutoffs
for i in range(len(__lowercase ) - 1 ):
__lowerCAmelCase , __lowerCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__lowerCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = weights[i], biases[i], self.out_projs[i]
__lowerCAmelCase = self._compute_logit(__lowercase , __lowercase , __lowercase , __lowercase )
__lowerCAmelCase = nn.functional.log_softmax(__lowercase , dim=1 )
__lowerCAmelCase = head_logprob[:, -i] + tail_logprob_i
__lowerCAmelCase = logprob_i
return out
| 474
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 474
| 1
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
snake_case_ : Tuple = imread(r'digital_image_processing/image_data/lena_small.jpg')
snake_case_ : Dict = cvtColor(img, COLOR_BGR2GRAY)
def A__ ( ):
_UpperCamelCase : Any = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def A__ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__ , 1_1_0 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def A__ ( ):
_UpperCamelCase : Any = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def A__ ( ):
_UpperCamelCase : Union[str, Any] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCamelCase : List[str] = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def A__ ( ):
assert gg.gaussian_filter(UpperCAmelCase__ , 5 , sigma=0.9 ).all()
def A__ ( ):
# laplace diagonals
_UpperCamelCase : Optional[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_UpperCamelCase : Tuple = conv.img_convolve(UpperCAmelCase__ , UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def A__ ( ):
assert med.median_filter(UpperCAmelCase__ , 3 ).any()
def A__ ( ):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def A__ ( ):
_UpperCamelCase : Dict = sp.make_sepia(UpperCAmelCase__ , 2_0 )
assert sepia.all()
def A__ ( UpperCAmelCase_ = "digital_image_processing/image_data/lena_small.jpg" ):
_UpperCamelCase : str = bs.Burkes(imread(UpperCAmelCase__ , 1 ) , 1_2_0 )
burkes.process()
assert burkes.output_img.any()
def A__ ( UpperCAmelCase_ = "digital_image_processing/image_data/lena_small.jpg" , ):
_UpperCamelCase : int = rs.NearestNeighbour(imread(UpperCAmelCase__ , 1 ) , 4_0_0 , 2_0_0 )
nn.process()
assert nn.output.any()
def A__ ( ):
_UpperCamelCase : Optional[int] = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
_UpperCamelCase : List[str] = imread(UpperCAmelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
_UpperCamelCase : Dict = 0
_UpperCamelCase : str = 0
_UpperCamelCase : Dict = image[x_coordinate][y_coordinate]
_UpperCamelCase : Union[str, Any] = lbp.get_neighbors_pixel(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCamelCase : Optional[int] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_UpperCamelCase : List[Any] = lbp.local_binary_value(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
assert lbp_image.any()
| 195
|
"""simple docstring"""
def _a ( UpperCAmelCase__ ) -> List[str]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( UpperCAmelCase__ ) -> int:
if len(UpperCAmelCase__ ) <= 1:
return arr, 0
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) // 2
__SCREAMING_SNAKE_CASE = arr[0:mid]
__SCREAMING_SNAKE_CASE = arr[mid:]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _count_cross_inversions(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = 0
while i < len(UpperCAmelCase__ ) and j < len(UpperCAmelCase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__SCREAMING_SNAKE_CASE = count_inversions_bf(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(UpperCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , UpperCAmelCase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__SCREAMING_SNAKE_CASE = count_inversions_bf(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(UpperCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCAmelCase__ )
# an empty list should also have zero inversions
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = count_inversions_bf(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(UpperCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 482
| 0
|
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(UpperCamelCase__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 703
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__A : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__A : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase_ ( ):
'''simple docstring'''
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 450
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 417
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowercase ( snake_case_ ):
lowercase = 'convbert'
def __init__( self : Union[str, Any] , snake_case : Tuple=3_0_5_2_2 , snake_case : List[Any]=7_6_8 , snake_case : Any=1_2 , snake_case : Optional[int]=1_2 , snake_case : Optional[int]=3_0_7_2 , snake_case : Tuple="gelu" , snake_case : Any=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[Any]=5_1_2 , snake_case : str=2 , snake_case : Tuple=0.02 , snake_case : Any=1e-12 , snake_case : List[str]=1 , snake_case : Any=0 , snake_case : Tuple=2 , snake_case : Any=7_6_8 , snake_case : Any=2 , snake_case : Tuple=9 , snake_case : int=1 , snake_case : str=None , **snake_case : Dict , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case , )
UpperCamelCase_ : List[Any] = vocab_size
UpperCamelCase_ : Any = hidden_size
UpperCamelCase_ : int = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : List[Any] = intermediate_size
UpperCamelCase_ : str = hidden_act
UpperCamelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_ : Dict = max_position_embeddings
UpperCamelCase_ : Dict = type_vocab_size
UpperCamelCase_ : str = initializer_range
UpperCamelCase_ : Any = layer_norm_eps
UpperCamelCase_ : Union[str, Any] = embedding_size
UpperCamelCase_ : int = head_ratio
UpperCamelCase_ : Optional[Any] = conv_kernel_size
UpperCamelCase_ : Any = num_groups
UpperCamelCase_ : int = classifier_dropout
class _lowercase ( snake_case_ ):
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase_ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 417
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
def __init__( self : Optional[Any] , a_ : List[str] , a_ : Dict=13 , a_ : str=7 , a_ : int=True , a_ : Union[str, Any]=True , a_ : Union[str, Any]=False , a_ : Tuple=True , a_ : Any=99 , a_ : Any=32 , a_ : Dict=5 , a_ : Optional[int]=4 , a_ : Tuple=37 , a_ : Dict="gelu" , a_ : Dict=0.1 , a_ : Any=0.1 , a_ : List[Any]=512 , a_ : Union[str, Any]=16 , a_ : Any=2 , a_ : List[str]=0.02 , a_ : Optional[Any]=3 , a_ : Union[str, Any]=4 , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[Any] ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A ( self : Any , a_ : Optional[int] , a_ : str , a_ : List[str] , a_ : Tuple , a_ : Union[str, Any] , a_ : Any ):
"""simple docstring"""
__snake_case = DistilBertModel(config=__A )
model.to(__A )
model.eval()
__snake_case = model(__A , __A )
__snake_case = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : List[Any] , a_ : str , a_ : int , a_ : int ):
"""simple docstring"""
__snake_case = DistilBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
__snake_case = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[str] , a_ : Tuple , a_ : Optional[int] , a_ : Optional[int] , a_ : Tuple , a_ : Dict , a_ : Any ):
"""simple docstring"""
__snake_case = DistilBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__snake_case = model(
__A , attention_mask=__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : int , a_ : str , a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[int] , a_ : List[Any] , a_ : Dict ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DistilBertForSequenceClassification(__A )
model.to(__A )
model.eval()
__snake_case = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , a_ : Dict , a_ : Optional[Any] , a_ : int , a_ : List[str] , a_ : Tuple , a_ : Any ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DistilBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
__snake_case = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[Any] , a_ : str , a_ : str , a_ : Dict , a_ : Optional[Any] , a_ : Any , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_choices
__snake_case = DistilBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
__A , attention_mask=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(__snake_case) = config_and_inputs
__snake_case = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = DistilBertModelTester(self )
__snake_case = ConfigTester(self , config_class=__A , dim=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__A )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__A )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__A )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__A )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__A )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__A )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = DistilBertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case = True
__snake_case = model_class(config=__A )
__snake_case = self._prepare_for_class(__A , __A )
__snake_case = torch.jit.trace(
__A , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , "traced_model.pt" ) )
__snake_case = torch.jit.load(os.path.join(__A , "traced_model.pt" ) , map_location=__A )
loaded(inputs_dict["input_ids"].to(__A ) , inputs_dict["attention_mask"].to(__A ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DistilBertModel.from_pretrained("distilbert-base-uncased" )
__snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case = model(__A , attention_mask=__A )[0]
__snake_case = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
__snake_case = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 703
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__snake_case = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 0
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = math.inf , SCREAMING_SNAKE_CASE = -math.inf , SCREAMING_SNAKE_CASE = math.inf , SCREAMING_SNAKE_CASE = -math.inf , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 1_00 , SCREAMING_SNAKE_CASE = 0.01 , SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
lowercase__ = False
lowercase__ = search_prob
lowercase__ = start_temperate
lowercase__ = []
lowercase__ = 0
lowercase__ = None
while not search_end:
lowercase__ = current_state.score()
if best_state is None or current_score > best_state.score():
lowercase__ = current_state
scores.append(SCREAMING_SNAKE_CASE )
iterations += 1
lowercase__ = None
lowercase__ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase__ = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
lowercase__ = neighbors.pop(SCREAMING_SNAKE_CASE )
lowercase__ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase__ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase__ = picked_neighbor
else:
lowercase__ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase__ = picked_neighbor
lowercase__ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase__ = True
else:
lowercase__ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
| 43
|
from typing import Any
def __A ( _A ):
"""simple docstring"""
if not input_list:
return []
__a = [input_list.count(_A ) for value in input_list]
__a = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197
| 0
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = CustomTokenizer
pass
| 81
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def snake_case ( self : Dict ):
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
lowercase__ : List[Any] = dtype
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
if common is None:
lowercase__ : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
return sample
def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : Tuple = state.common.alphas_cumprod[t]
lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : List[Any] = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = timestep
if key is None:
lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : Dict = state.common.alphas_cumprod[t]
lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Optional[Any] = 1 - alpha_prod_t
lowercase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 81
| 1
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a = "src/transformers"
__a = "docs/source/en/tasks"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case__ : List[str] = f.readlines()
# Find the start prompt.
snake_case__ : Union[str, Any] = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
snake_case__ : Tuple = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
__a = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : List[str] = TASK_GUIDE_TO_MODELS[task_guide]
snake_case__ : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCAmelCase , set() )
snake_case__ : List[str] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> Tuple:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
snake_case__ : List[Any] = get_model_list_for_task(_lowerCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
""" to fix this.""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__a = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 374
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 16
__a = 32
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = 16 ) -> Optional[Any]:
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case__ : Optional[int] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : List[str] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
snake_case__ : Tuple = 8
else:
snake_case__ : int = None
return tokenizer.pad(
_lowerCAmelCase , padding="""longest""" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
snake_case__ : List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
snake_case__ : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a = mocked_dataloaders # noqa: F811
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _lowerCAmelCase ) == "1":
snake_case__ : int = 2
# New Code #
snake_case__ : Any = int(args.gradient_accumulation_steps )
# Initialize accelerator
snake_case__ : Any = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : List[Any] = config["""lr"""]
snake_case__ : Optional[Any] = int(config["""num_epochs"""] )
snake_case__ : Union[str, Any] = int(config["""seed"""] )
snake_case__ : List[str] = int(config["""batch_size"""] )
snake_case__ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(_lowerCAmelCase )
snake_case__ , snake_case__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : Any = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
snake_case__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_lowerCAmelCase ):
snake_case__ : Any = model(**_lowerCAmelCase )
snake_case__ : str = output.loss
accelerator.backward(_lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : str = model(**_lowerCAmelCase )
snake_case__ : Optional[int] = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
snake_case__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _lowerCAmelCase )
def __snake_case( ) -> List[str]:
snake_case__ : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
snake_case__ : Tuple = parser.parse_args()
snake_case__ : Dict = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 374
| 1
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__a : List[Any] = """examples/"""
__a : List[Any] = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
__a : int = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
__a : int = """README.md"""
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
with open(lowercase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase = f.read()
UpperCamelCase , UpperCamelCase = REPLACE_PATTERNS[pattern]
UpperCamelCase = replace.replace("VERSION" , lowercase_ )
UpperCamelCase = re_pattern.sub(lowercase_ , lowercase_ )
with open(lowercase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowercase_ )
def __magic_name__ ( lowercase_ ) -> Tuple:
'''simple docstring'''
for folder, directories, fnames in os.walk(lowercase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowercase_ , lowercase_ ) , lowercase_ , pattern="examples" )
def __magic_name__ ( lowercase_ , lowercase_=False ) -> List[str]:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase_ , lowercase_ , lowercase_ )
if not patch:
update_version_in_examples(lowercase_ )
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
UpperCamelCase = "🤗 Transformers currently provides the following architectures"
UpperCamelCase = "1. Want to contribute a new model?"
with open(lowercase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase = f.readlines()
# Find the start of the list.
UpperCamelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCamelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(lowercase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowercase_ )
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
UpperCamelCase = f.read()
UpperCamelCase = REPLACE_PATTERNS["init"][0].search(lowercase_ ).groups()[0]
return packaging.version.parse(lowercase_ )
def __magic_name__ ( lowercase_=False ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCamelCase = default_version.base_version
elif patch:
UpperCamelCase = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
UpperCamelCase = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
UpperCamelCase = input(f'''Which version are you releasing? [{default_version}]''' )
if len(lowercase_ ) == 0:
UpperCamelCase = default_version
print(f'''Updating version to {version}.''' )
global_version_update(lowercase_ , patch=lowercase_ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase = get_version()
UpperCamelCase = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
UpperCamelCase = current_version.base_version
# Check with the user we got that right.
UpperCamelCase = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(lowercase_ ) == 0:
UpperCamelCase = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(lowercase_ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
__a : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 700
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
def __magic_name__ ( lowercase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = torch.load(lowercase_ , map_location="cpu" )
if "model" in sd.keys():
UpperCamelCase = torch.load(lowercase_ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCamelCase = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase_ )
UpperCamelCase = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCamelCase = sd.pop(lowercase_ )
UpperCamelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCamelCase = sd[key]
# We split QKV in separate Q,K,V
UpperCamelCase = key.replace(".qkv_proj." , ".q_proj." )
UpperCamelCase = key.replace(".qkv_proj." , ".k_proj." )
UpperCamelCase = key.replace(".qkv_proj." , ".v_proj." )
UpperCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCamelCase , UpperCamelCase , UpperCamelCase = torch.split(lowercase_ , depth // 3 , dim=0 )
UpperCamelCase = q
UpperCamelCase = k
UpperCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=None ) -> str:
'''simple docstring'''
UpperCamelCase = load_checkpoint(lowercase_ )
if config is not None:
UpperCamelCase = OPTConfig.from_pretrained(lowercase_ )
else:
UpperCamelCase = OPTConfig()
UpperCamelCase = OPTModel(lowercase_ ).half().eval()
model.load_state_dict(lowercase_ )
# Check results
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
__a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__a : Dict = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 414
| 0
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = (DDPMScheduler,)
def __snake_case( self , **A_ ):
_UpperCAmelCase : Union[str, Any] = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**A_ )
return config
def __snake_case( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A_ )
def __snake_case( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __snake_case( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def __snake_case( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def __snake_case( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __snake_case( self ):
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def __snake_case( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __snake_case( self ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=A_ )
def __snake_case( self ):
_UpperCAmelCase : str = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __snake_case( self ):
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**A_ )
_UpperCAmelCase : List[Any] = len(A_ )
_UpperCAmelCase : Tuple = self.dummy_model()
_UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter
_UpperCAmelCase : Dict = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
_UpperCAmelCase : str = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : Any = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase : Any = pred_prev_sample
_UpperCAmelCase : Any = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __snake_case( self ):
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCAmelCase : Union[str, Any] = scheduler_class(**A_ )
_UpperCAmelCase : str = len(A_ )
_UpperCAmelCase : str = self.dummy_model()
_UpperCAmelCase : Any = self.dummy_sample_deter
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
_UpperCAmelCase : Optional[Any] = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : str = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase : Optional[Any] = pred_prev_sample
_UpperCAmelCase : List[str] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __snake_case( self ):
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : Tuple = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**A_ )
_UpperCAmelCase : Tuple = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
_UpperCAmelCase : Tuple = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
_UpperCAmelCase : Optional[Any] = -1
else:
_UpperCAmelCase : List[Any] = timesteps[i + 1]
_UpperCAmelCase : Optional[Any] = scheduler.previous_timestep(A_ )
_UpperCAmelCase : Tuple = prev_t.item()
self.assertEqual(A_ , A_ )
def __snake_case( self ):
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**A_ )
_UpperCAmelCase : Union[str, Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(A_ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=A_ )
def __snake_case( self ):
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**A_ )
_UpperCAmelCase : Union[str, Any] = [1_00, 87, 50, 1, 0]
_UpperCAmelCase : Optional[int] = len(A_ )
with self.assertRaises(A_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def __snake_case( self ):
_UpperCAmelCase : Dict = self.scheduler_classes[0]
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**A_ )
_UpperCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=A_ )
| 643
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _SCREAMING_SNAKE_CASE ( A , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
__SCREAMING_SNAKE_CASE = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def __snake_case( self , A_=0 ):
_UpperCAmelCase : List[Any] = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(A_ ) )
_UpperCAmelCase : Any = torch.manual_seed(A_ )
_UpperCAmelCase : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __snake_case( self ):
_UpperCAmelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
_UpperCAmelCase : List[str] = pipe(**A_ ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Any = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __snake_case( self ):
_UpperCAmelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
_UpperCAmelCase : Optional[Any] = pipe(**A_ ).images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Optional[Any] = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __snake_case( self ):
_UpperCAmelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : str = self.get_dummy_inputs()
_UpperCAmelCase : List[Any] = pipe(**A_ ).images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Tuple = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __snake_case( self ):
_UpperCAmelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_UpperCAmelCase : Union[str, Any] = pipe(**A_ ).images
_UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : List[Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __snake_case( self ):
_UpperCAmelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_UpperCAmelCase : str = pipe(**A_ ).images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Tuple = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __snake_case( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case( self ):
_UpperCAmelCase : List[str] = ort.SessionOptions()
_UpperCAmelCase : Union[str, Any] = False
return options
def __snake_case( self ):
_UpperCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_UpperCAmelCase : Any = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
_UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Union[str, Any] = """A fantasy landscape, trending on artstation"""
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Tuple = pipe(
prompt=A_ , image=A_ , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type="""np""" , )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : List[str] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __snake_case( self ):
_UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_UpperCAmelCase : Optional[int] = init_image.resize((1_28, 1_28) )
_UpperCAmelCase : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
_UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Union[str, Any] = """A fantasy landscape, trending on artstation"""
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = pipe(
prompt=A_ , image=A_ , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type="""np""" , )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : Dict = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Optional[Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 643
| 1
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : int = 10**9 ):
UpperCAmelCase : int = 1
UpperCAmelCase : List[Any] = 2
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : str = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 359
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Any = XLMTokenizer
__lowerCAmelCase : List[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCAmelCase : str = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : List[Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] = """lower newer"""
UpperCAmelCase : Dict = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase : Optional[int] = """lower"""
UpperCAmelCase : Optional[int] = ["""low""", """er</w>"""]
UpperCAmelCase : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = tokens + ["""<unk>"""]
UpperCAmelCase : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
UpperCAmelCase : int = tokenizer.encode("""sequence builders""" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 359
| 1
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Any =0.0_0
lowercase : Tuple =0
for resistor in resistors:
if resistor <= 0:
lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__magic_name__ )
first_sum += 1 / float(__magic_name__ )
index += 1
return 1 / first_sum
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Optional[Any] =0.0_0
lowercase : int =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase : Tuple =f'''Resistor at index {index} has a negative value!'''
raise ValueError(__magic_name__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCAmelCase_ ( __A : list ):
'''simple docstring'''
if not postfix_notation:
return 0
snake_case: List[str] = {'+', '-', '*', '/'}
snake_case: list[Any] = []
for token in postfix_notation:
if token in operations:
snake_case , snake_case: int = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''maskformer-swin'''
UpperCAmelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Any , lowercase__ : List[str]=224 , lowercase__ : Dict=4 , lowercase__ : List[Any]=3 , lowercase__ : int=96 , lowercase__ : int=[2, 2, 6, 2] , lowercase__ : Any=[3, 6, 12, 24] , lowercase__ : int=7 , lowercase__ : Optional[int]=4.0 , lowercase__ : Tuple=True , lowercase__ : Tuple=0.0 , lowercase__ : Dict=0.0 , lowercase__ : List[str]=0.1 , lowercase__ : Tuple="gelu" , lowercase__ : Optional[Any]=False , lowercase__ : Tuple=0.0_2 , lowercase__ : List[Any]=1e-5 , lowercase__ : Optional[Any]=None , lowercase__ : List[Any]=None , **lowercase__ : str , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowercase__ )
_UpperCamelCase : List[Any] = image_size
_UpperCamelCase : str = patch_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : int = embed_dim
_UpperCamelCase : int = depths
_UpperCamelCase : Tuple = len(lowercase__ )
_UpperCamelCase : Optional[int] = num_heads
_UpperCamelCase : int = window_size
_UpperCamelCase : List[Any] = mlp_ratio
_UpperCamelCase : int = qkv_bias
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCamelCase : Dict = drop_path_rate
_UpperCamelCase : List[Any] = hidden_act
_UpperCamelCase : Optional[int] = use_absolute_embeddings
_UpperCamelCase : Optional[Any] = layer_norm_eps
_UpperCamelCase : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase : int = int(embed_dim * 2 ** (len(lowercase__ ) - 1) )
_UpperCamelCase : int = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(lowercase__ ) + 1 )]
_UpperCamelCase , _UpperCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=lowercase__ , out_indices=lowercase__ , stage_names=self.stage_names )
| 204
|
'''simple docstring'''
def __A ( UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : str = []
_UpperCamelCase : Optional[Any] = set({"(", "[", "{"} )
_UpperCamelCase : int = set({")", "]", "}"} )
_UpperCamelCase : Dict = {"{": "}", "[": "]", "(": ")"}
for i in range(len(UpperCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCAmelCase ) == 0 or (len(UpperCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCAmelCase ) == 0
def __A ( ) -> str:
'''simple docstring'''
_UpperCamelCase : Any = input("Enter sequence of brackets: " )
if is_balanced(UpperCAmelCase ):
print(UpperCAmelCase ,"is balanced" )
else:
print(UpperCAmelCase ,"is not balanced" )
if __name__ == "__main__":
main()
| 204
| 1
|
"""simple docstring"""
def snake_case ( lowerCAmelCase_ = 4000000 ) -> int:
_snake_case = []
_snake_case , _snake_case = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCAmelCase_ )
_snake_case , _snake_case = b, a + b
return sum(lowerCAmelCase_ )
if __name__ == "__main__":
print(F"{solution() = }")
| 103
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 16
__a = 32
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = 16 ) -> Optional[Any]:
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case__ : Optional[int] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : List[str] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
snake_case__ : Tuple = 8
else:
snake_case__ : int = None
return tokenizer.pad(
_lowerCAmelCase , padding="""longest""" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
snake_case__ : List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
snake_case__ : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a = mocked_dataloaders # noqa: F811
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _lowerCAmelCase ) == "1":
snake_case__ : int = 2
# New Code #
snake_case__ : Any = int(args.gradient_accumulation_steps )
# Initialize accelerator
snake_case__ : Any = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : List[Any] = config["""lr"""]
snake_case__ : Optional[Any] = int(config["""num_epochs"""] )
snake_case__ : Union[str, Any] = int(config["""seed"""] )
snake_case__ : List[str] = int(config["""batch_size"""] )
snake_case__ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(_lowerCAmelCase )
snake_case__ , snake_case__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : Any = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
snake_case__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_lowerCAmelCase ):
snake_case__ : Any = model(**_lowerCAmelCase )
snake_case__ : str = output.loss
accelerator.backward(_lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : str = model(**_lowerCAmelCase )
snake_case__ : Optional[int] = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
snake_case__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _lowerCAmelCase )
def __snake_case( ) -> List[str]:
snake_case__ : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
snake_case__ : Tuple = parser.parse_args()
snake_case__ : Dict = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 374
| 0
|
from __future__ import annotations
__magic_name__ = '''Muhammad Umer Farooq'''
__magic_name__ = '''MIT'''
__magic_name__ = '''1.0.0'''
__magic_name__ = '''Muhammad Umer Farooq'''
__magic_name__ = '''contact@muhammadumerfarooq.me'''
__magic_name__ = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
def __init__( self , lowerCamelCase ):
super().__init__()
snake_case__ = []
snake_case__ = domain
def A_ ( self , lowerCamelCase , lowerCamelCase ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ = parse.urljoin(self.domain , lowerCamelCase )
self.urls.append(lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return ".".join(get_sub_domain_name(__lowerCAmelCase ).split("." )[-2:] )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return parse.urlparse(__lowerCAmelCase ).netloc
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = "https://github.com" ):
snake_case__ = get_domain_name(__lowerCAmelCase )
# Initialize the parser
snake_case__ = Parser(__lowerCAmelCase )
try:
# Open URL
snake_case__ = requests.get(__lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ = requests.get(__lowerCAmelCase )
# Get the valid email.
snake_case__ = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ = emails_from_url('''https://github.com''')
print(F'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 530
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case__ = ksize + 1
snake_case__ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
# distance from center
snake_case__ = x - ksize // 2
snake_case__ = y - ksize // 2
# degree to radiant
snake_case__ = theta / 180 * np.pi
snake_case__ = np.cos(_theta )
snake_case__ = np.sin(_theta )
# get kernel x
snake_case__ = cos_theta * px + sin_theta * py
# get kernel y
snake_case__ = -sin_theta * px + cos_theta * py
# fill kernel
snake_case__ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__magic_name__ = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
__magic_name__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__magic_name__ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__magic_name__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__magic_name__ = out / out.max() * 255
__magic_name__ = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 530
| 1
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_a : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class _lowercase ( __lowercase ):
def __init__( self : Any , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , SCREAMING_SNAKE_CASE_ : Union[str, List[str], "Image", List["Image"]] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> List[str]:
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def a ( self : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[Any]="This is a photo of {}." ) -> Dict:
__snake_case = load_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(SCREAMING_SNAKE_CASE_ ) for x in candidate_labels]
__snake_case = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE_ )
__snake_case = [text_inputs]
return inputs
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Union[str, Any]:
__snake_case = model_inputs.pop('candidate_labels' )
__snake_case = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE_ ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
__snake_case = model_outputs.pop('candidate_labels' )
__snake_case = model_outputs['logits'][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__snake_case = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , key=lambda SCREAMING_SNAKE_CASE_ : -x[0] )
]
return result
| 56
|
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_lowercase = str(bin(_A ) )[2:] # remove the leading "0b"
_lowercase = str(bin(_A ) )[2:]
_lowercase = max(len(_A ) ,len(_A ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_A ) ,b_binary.zfill(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681
|
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
lowerCAmelCase : Tuple = csv.reader(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for dataset in encoded_datasets:
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
lowerCAmelCase : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Tuple = with_conta
lowerCAmelCase : Any = with_conta
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Optional[Any] = with_conta
lowerCAmelCase : List[Any] = with_conta
lowerCAmelCase : str = mc_label
lowerCAmelCase : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
lowerCAmelCase : Tuple = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase : str = ["_start_", "_delimiter_", "_classify_"]
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
lowerCAmelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase : int = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase : Tuple = (train_dataset, eval_dataset)
lowerCAmelCase : Dict = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
lowerCAmelCase : Any = model.config.n_positions // 2 - 2
lowerCAmelCase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase : Any = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase : List[str] = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
lowerCAmelCase : int = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SequentialSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase : int = args.max_steps
lowerCAmelCase : str = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase : Dict = list(model.named_parameters() )
lowerCAmelCase : str = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowerCAmelCase : Tuple = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowerCAmelCase : Tuple = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase : str = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = tqdm(SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = batch
lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
lowerCAmelCase , lowerCAmelCase : Optional[int] = 0, 0
lowerCAmelCase , lowerCAmelCase : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc="Evaluating" ):
lowerCAmelCase : List[Any] = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = batch
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mc_logits.detach().cpu().numpy()
lowerCAmelCase : List[str] = mc_labels.to("cpu" ).numpy()
lowerCAmelCase : Any = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase : List[Any] = eval_loss / nb_eval_steps
lowerCAmelCase : List[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowerCAmelCase : List[str] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681
| 1
|
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_SCREAMING_SNAKE_CASE = open # noqa: we just need to have a builtin inside this module to test it properly
| 369
|
import cva
import numpy as np
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : float , lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
if k in (0.04, 0.06):
__lowerCAmelCase : List[Any] = k
__lowerCAmelCase : str = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Dict ) -> str:
"""simple docstring"""
return str(self.k )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = cva.imread(lowerCAmelCase , 0 )
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = img.shape
__lowerCAmelCase : list[list[int]] = []
__lowerCAmelCase : Dict = img.copy()
__lowerCAmelCase : Any = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB )
__lowerCAmelCase ,__lowerCAmelCase : List[str] = np.gradient(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = dx**2
__lowerCAmelCase : Dict = dy**2
__lowerCAmelCase : Any = dx * dy
__lowerCAmelCase : Dict = 0.04
__lowerCAmelCase : List[str] = self.window_size // 2
for y in range(lowerCAmelCase , h - offset ):
for x in range(lowerCAmelCase , w - offset ):
__lowerCAmelCase : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase : Optional[Any] = (wxx * wyy) - (wxy**2)
__lowerCAmelCase : List[Any] = wxx + wyy
__lowerCAmelCase : int = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCAmelCase = HarrisCorner(0.04, 3)
__UpperCAmelCase , __UpperCAmelCase = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 651
| 0
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a__ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase__ :Optional[int] , lowercase__ :int = 13 , lowercase__ :int = 64 , lowercase__ :int = 2 , lowercase__ :int = 3 , lowercase__ :int = 3 , lowercase__ :bool = True , lowercase__ :bool = True , lowercase__ :int = 128 , lowercase__ :List[str]=[16, 32, 64, 128] , lowercase__ :int = 7 , lowercase__ :int = 4 , lowercase__ :int = 37 , lowercase__ :str = "gelu" , lowercase__ :float = 0.1 , lowercase__ :float = 0.1 , lowercase__ :int = 10 , lowercase__ :float = 0.02 , lowercase__ :int = 2 , lowercase__ :int = 1 , lowercase__ :int = 128 , lowercase__ :List[int] = [2, 2, 2, 2] , lowercase__ :int = 2 , lowercase__ :int = 2 , ):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = encoder_stride
lowercase = num_attention_outputs
lowercase = embed_dim
lowercase = embed_dim + 1
lowercase = resolution
lowercase = depths
lowercase = hidden_sizes
lowercase = dim
lowercase = mlp_expansion_ratio
def __UpperCAmelCase ( self :int ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self :Any ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def __UpperCAmelCase ( self :Any , lowercase__ :Union[str, Any] , lowercase__ :str , lowercase__ :Dict ):
lowercase = TFEfficientFormerModel(config=lowercase__ )
lowercase = model(lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self :Union[str, Any] , lowercase__ :Optional[int] , lowercase__ :List[str] , lowercase__ :str ):
lowercase = self.type_sequence_label_size
lowercase = TFEfficientFormerForImageClassification(lowercase__ )
lowercase = model(lowercase__ , labels=lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase = 1
lowercase = TFEfficientFormerForImageClassification(lowercase__ )
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self :Optional[Any] ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a__ ( _snake_case , _snake_case , unittest.TestCase ):
"""simple docstring"""
A__ : int = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
A__ : Dict = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
A__ : Dict = False
A__ : Any = False
A__ : str = False
A__ : List[Any] = False
A__ : int = False
def __UpperCAmelCase ( self :Dict ):
lowercase = TFEfficientFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def __UpperCAmelCase ( self :Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def __UpperCAmelCase ( self :str ):
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def __UpperCAmelCase ( self :List[str] ):
pass
def __UpperCAmelCase ( self :List[Any] ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(lowercase__ )
lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase__ )
def __UpperCAmelCase ( self :str ):
def check_hidden_states_output(lowercase__ :Tuple , lowercase__ :int , lowercase__ :int ):
lowercase = model_class(lowercase__ )
lowercase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) , training=lowercase__ )
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase__ ) , lowercase__ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
lowercase = seq_length * self.model_tester.chunk_length
else:
lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(lowercase__ , (list, tuple) )
self.assertEqual(len(lowercase__ ) , lowercase__ )
lowercase = getattr(self.model_tester , 'seq_length' , lowercase__ )
lowercase = getattr(self.model_tester , 'decoder_seq_length' , lowercase__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :Tuple , lowercase__ :Tuple , lowercase__ :List[str] , lowercase__ :Union[str, Any]=False ):
lowercase = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCAmelCase ( self :Any ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def __UpperCAmelCase ( self :Tuple ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__ )
def __UpperCAmelCase ( self :str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def __UpperCAmelCase ( self :Any ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFEfficientFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __UpperCAmelCase ( self :Tuple ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
lowercase = getattr(self.model_tester , 'seq_length' , lowercase__ )
lowercase = getattr(self.model_tester , 'encoder_seq_length' , lowercase__ )
lowercase = getattr(self.model_tester , 'key_length' , lowercase__ )
lowercase = getattr(self.model_tester , 'chunk_length' , lowercase__ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowercase = True
lowercase = False
lowercase = True
lowercase = model_class(lowercase__ )
lowercase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) , training=lowercase__ )
lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase = True
lowercase = model_class(lowercase__ )
lowercase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) , training=lowercase__ )
lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def __UpperCAmelCase ( self :str ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowercase = model_class(lowercase__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowercase__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowercase = model(lowercase__ )
self.assertTrue(outputs_dict is not None )
def __snake_case ( ):
"""simple docstring"""
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self :Tuple ):
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self :int ):
lowercase = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=lowercase__ , return_tensors='tf' )
# forward pass
lowercase = model(**lowercase__ , training=lowercase__ )
# verify the logits
lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
lowercase = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self :Optional[Any] ):
lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=lowercase__ , return_tensors='tf' )
# forward pass
lowercase = model(**lowercase__ , training=lowercase__ )
# verify the logits
lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
lowercase = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 314
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314
| 1
|
from __future__ import annotations
from collections import deque
class A__ :
"""simple docstring"""
def __init__( self : Any , lowerCamelCase__ : list[str] ):
a__ : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(lowerCamelCase__ )
self.set_fail_transitions()
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : str ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _UpperCamelCase( self : Any , lowerCamelCase__ : str ):
a__ : List[str] = 0
for character in keyword:
a__ : Tuple = self.find_next_state(lowerCamelCase__ , lowerCamelCase__ )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
a__ : Union[str, Any] = len(self.adlist ) - 1
else:
a__ : List[str] = next_state
self.adlist[current_state]["output"].append(lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCamelCase__ )
a__ : Tuple = 0
while q:
a__ : str = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCamelCase__ )
a__ : Tuple = self.adlist[r]["fail_state"]
while (
self.find_next_state(lowerCamelCase__ , self.adlist[child]["value"] ) is None
and state != 0
):
a__ : List[Any] = self.adlist[state]["fail_state"]
a__ : Optional[int] = self.find_next_state(
lowerCamelCase__ , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
a__ : Dict = 0
a__ : Union[str, Any] = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str ):
a__ : dict = {} # returns a dict with keywords and list of its occurrences
a__ : Tuple = 0
for i in range(len(lowerCamelCase__ ) ):
while (
self.find_next_state(lowerCamelCase__ , string[i] ) is None
and current_state != 0
):
a__ : Union[str, Any] = self.adlist[current_state]["fail_state"]
a__ : Optional[Any] = self.find_next_state(lowerCamelCase__ , string[i] )
if next_state is None:
a__ : str = 0
else:
a__ : int = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
a__ : Optional[Any] = []
result[key].append(i - len(lowerCamelCase__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
"""simple docstring"""
from __future__ import annotations
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
def __UpperCAmelCase ( self ):
__a = abs(self.pos_x - self.goal_x )
__a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _a ):
return self.f_cost < other.f_cost
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
__a = [self.start]
__a = []
__a = False
def __UpperCAmelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__a = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def __UpperCAmelCase ( self , _a ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def __UpperCAmelCase ( self , _a ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase_ = GreedyBestFirst(init, goal)
lowercase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase_ = 2
for elem in grid:
print(elem)
| 695
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = KandinskyVaaImgaImgPipeline
lowerCamelCase_ = ['image_embeds', 'negative_image_embeds', 'image']
lowerCamelCase_ = [
'image_embeds',
'negative_image_embeds',
'image',
]
lowerCamelCase_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase_ = False
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 100
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : List[str] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase : Optional[int] =UNetaDConditionModel(**UpperCAmelCase__ )
return model
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : int =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Optional[int] =self.dummy_unet
lowercase : Any =self.dummy_movq
lowercase : Dict ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase : Optional[int] =DDIMScheduler(**UpperCAmelCase__ )
lowercase : int ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=0 ):
'''simple docstring'''
lowercase : List[Any] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : List[str] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase__ )
# create init_image
lowercase : List[str] =floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Any =Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : Optional[Any] =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : Optional[Any] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : Tuple ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[str] ='''cpu'''
lowercase : int =self.get_dummy_components()
lowercase : List[str] =self.pipeline_class(**UpperCAmelCase__ )
lowercase : Optional[int] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[str] =pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
lowercase : Optional[int] =output.images
lowercase : Optional[Any] =pipe(
**self.get_dummy_inputs(UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ , )[0]
lowercase : Optional[int] =image[0, -3:, -3:, -1]
lowercase : Optional[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : str =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowercase : List[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase : List[str] ='''A red cartoon frog, 4k'''
lowercase : Any =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase__ )
lowercase : List[str] =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase : Optional[int] =pipeline.to(UpperCAmelCase__ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[Any] =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase , lowercase : Any =pipe_prior(
UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase : str =pipeline(
image=UpperCAmelCase__ , image_embeds=UpperCAmelCase__ , negative_image_embeds=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
lowercase : Optional[int] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
| 708
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88
| 0
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase_ ( _UpperCAmelCase :str , _UpperCAmelCase :str , _UpperCAmelCase :Optional[int] , _UpperCAmelCase :Optional[int] ) -> List[str]:
'''simple docstring'''
A_ = s.rsplit(_UpperCAmelCase , _UpperCAmelCase )
return new.join(_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase :int ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase_ ( _UpperCAmelCase :List[str] ) -> Union[str, Any]:
'''simple docstring'''
A_ = {}
A_ = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
A_ = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
A_ = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
A_ = rreplace(_UpperCAmelCase , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
A_ = rreplace(_UpperCAmelCase , '''.b''' , '''.bias''' , 1 )
A_ = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase_ ( _UpperCAmelCase :Dict , _UpperCAmelCase :Tuple , _UpperCAmelCase :int=None , _UpperCAmelCase :Optional[int]=True ) -> int:
'''simple docstring'''
from dall_e import Encoder
A_ = Encoder()
if os.path.exists(_UpperCAmelCase ):
A_ = torch.load(_UpperCAmelCase )
else:
A_ = torch.hub.load_state_dict_from_url(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A_ = ckpt.state_dict()
encoder.load_state_dict(_UpperCAmelCase )
if config_path is not None:
A_ = FlavaImageCodebookConfig.from_pretrained(_UpperCAmelCase )
else:
A_ = FlavaImageCodebookConfig()
A_ = FlavaImageCodebook(_UpperCAmelCase ).eval()
A_ = encoder.state_dict()
A_ = upgrade_state_dict(_UpperCAmelCase )
hf_model.load_state_dict(_UpperCAmelCase )
A_ = hf_model.state_dict()
A_ = count_parameters(_UpperCAmelCase )
A_ = count_parameters(_UpperCAmelCase )
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_UpperCAmelCase )
else:
return hf_state_dict
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a__ : List[Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 188
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case__ :
'''simple docstring'''
__A = 42
__A = None
__A = None
_lowerCamelCase : str = namedtuple('CoinsDistribResult', 'moves excess')
def _lowerCAmelCase ( __magic_name__ :TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ :TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ :TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__magic_name__ :TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_, UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_, UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121
| 0
|
def A ( _lowercase = 1_000_000 ):
SCREAMING_SNAKE_CASE : Dict = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
SCREAMING_SNAKE_CASE : Optional[Any] = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 719
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
| 0
|
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def _lowercase ( self ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _lowercase ( self ):
with self.assertRaises(UpperCAmelCase_ ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _lowercase ( self ):
with self.assertRaises(UpperCAmelCase_ ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _lowercase ( self ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _lowercase ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case_ = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _lowercase ( self ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _lowercase ( self ):
snake_case_ = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _lowercase ( self ):
snake_case_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _lowercase ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case_ = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _lowercase ( self ):
snake_case_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _lowercase ( self ):
snake_case_ = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _lowercase ( self ):
import PIL.Image
snake_case_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=UpperCAmelCase_ ) as mock_cast_to_python_objects:
snake_case_ = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
snake_case_ , snake_case_ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , UpperCAmelCase_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __snake_case ( lowercase : Optional[int] , lowercase : int ):
snake_case_ = pa.BufferReader(lowercase ) if isinstance(lowercase , pa.Buffer ) else pa.memory_map(lowercase )
snake_case_ = pa.ipc.open_stream(lowercase )
snake_case_ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __snake_case ( lowercase : Union[str, Any] , lowercase : int ):
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(lowercase ) if fields else None
with ArrowWriter(stream=lowercase , schema=lowercase , writer_batch_size=lowercase ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowercase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __snake_case ( ):
snake_case_ = pa.BufferOutputStream()
snake_case_ = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=lowercase , features=lowercase ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
snake_case_ = pa.BufferReader(output.getvalue() )
snake_case_ = pa.ipc.open_stream(lowercase )
snake_case_ = f.read_all()
snake_case_ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowercase )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __snake_case ( lowercase : Tuple ):
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase , writer_batch_size=lowercase , hash_salt="split_name" , check_duplicates=lowercase , ) as writer:
with pytest.raises(lowercase ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
snake_case_ , snake_case_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __snake_case ( lowercase : Dict ):
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase , writer_batch_size=lowercase , hash_salt="split_name" , check_duplicates=lowercase , ) as writer:
with pytest.raises(lowercase ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
snake_case_ , snake_case_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __snake_case ( lowercase : Union[str, Any] ):
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase , writer_batch_size=lowercase , hash_salt="split_name" , check_duplicates=lowercase , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __snake_case ( lowercase : List[str] , lowercase : Dict ):
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(lowercase ) if fields else None
with ArrowWriter(stream=lowercase , schema=lowercase , writer_batch_size=lowercase ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowercase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __snake_case ( lowercase : Dict , lowercase : List[Any] ):
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(lowercase ) if fields else None
with ArrowWriter(stream=lowercase , schema=lowercase , writer_batch_size=lowercase ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowercase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __snake_case ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(lowercase ) if fields else None
with ArrowWriter(stream=lowercase , schema=lowercase , writer_batch_size=lowercase ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowercase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __snake_case ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = {"col_1": pa.string(), "col_2": pa.intaa()}
snake_case_ = os.path.join(lowercase , "test.arrow" )
with ArrowWriter(path=lowercase , schema=pa.schema(lowercase ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowercase , metadata=writer._schema.metadata )
_check_output(lowercase , 1 )
def __snake_case ( lowercase : Optional[Any] ):
if pa.types.is_list(lowercase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __snake_case ( lowercase : Optional[Any] , lowercase : Union[str, Any] ):
if isinstance(lst[0] , lowercase ):
change_first_primitive_element_in_list(lst[0] , lowercase )
else:
snake_case_ = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __snake_case ( lowercase : int , lowercase : List[str] , lowercase : Union[str, Any] ):
snake_case_ = pa.array(TypedSequence(lowercase , optimized_int_type=lowercase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __snake_case ( lowercase : Dict , lowercase : Union[str, Any] , lowercase : List[Any] ):
# in range
snake_case_ = pa.array(OptimizedTypedSequence(lowercase , col=lowercase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
snake_case_ = copy.deepcopy(lowercase )
snake_case_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowercase , lowercase )
snake_case_ = pa.array(OptimizedTypedSequence(lowercase , col=lowercase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __snake_case ( lowercase : Union[str, Any] , lowercase : int ):
snake_case_ = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=lowercase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __snake_case ( lowercase : Optional[int] ):
snake_case_ = "mock://dataset-train.arrow"
with ArrowWriter(path=lowercase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowercase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowercase )
def __snake_case ( ):
snake_case_ = pa.BufferOutputStream()
with ParquetWriter(stream=lowercase ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
snake_case_ = pa.BufferReader(output.getvalue() )
snake_case_ = pq.read_table(lowercase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __snake_case ( lowercase : str , lowercase : str ):
import PIL.Image
snake_case_ = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowercase , format="png" )
snake_case_ = pa.BufferOutputStream()
with ParquetWriter(
stream=lowercase , features=Features({"image": Image()} ) , embed_local_files=lowercase ) as writer:
writer.write({"image": image_path} )
writer.finalize()
snake_case_ = pa.BufferReader(output.getvalue() )
snake_case_ = pq.read_table(lowercase )
snake_case_ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , lowercase )
with open(lowercase , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __snake_case ( ):
snake_case_ = pa.schema([pa.field("col_1" , pa.string() , nullable=lowercase )] )
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(stream=lowercase ) as writer:
writer._build_writer(inferred_schema=lowercase )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 508
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 508
| 1
|
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0.2 , snake_case__=0.2 ):
lowerCAmelCase : Optional[Any] = bp_numa
lowerCAmelCase : List[str] = bp_numa
lowerCAmelCase : List[str] = bp_numa
lowerCAmelCase : List[Any] = conva_get[:2]
lowerCAmelCase : Union[str, Any] = conva_get[2]
lowerCAmelCase : Dict = size_pa
lowerCAmelCase : Dict = rate_w
lowerCAmelCase : List[Any] = rate_t
lowerCAmelCase : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCAmelCase : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowerCAmelCase : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
lowerCAmelCase : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase ( self , snake_case__ ):
# save model dict with pickle
lowerCAmelCase : Union[str, Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(snake_case__ , 'wb' ) as f:
pickle.dump(snake_case__ , snake_case__ )
print(f"Model saved: {save_path}" )
@classmethod
def lowercase ( cls , snake_case__ ):
# read saved model
with open(snake_case__ , 'rb' ) as f:
lowerCAmelCase : Tuple = pickle.load(snake_case__ ) # noqa: S301
lowerCAmelCase : Dict = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowerCAmelCase : Any = model_dic.get('size_pooling1' )
lowerCAmelCase : str = model_dic.get('num_bp1' )
lowerCAmelCase : Dict = model_dic.get('num_bp2' )
lowerCAmelCase : List[str] = model_dic.get('num_bp3' )
lowerCAmelCase : str = model_dic.get('rate_weight' )
lowerCAmelCase : List[str] = model_dic.get('rate_thre' )
# create model instance
lowerCAmelCase : int = CNN(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# modify model parameter
lowerCAmelCase : Optional[Any] = model_dic.get('w_conv1' )
lowerCAmelCase : Any = model_dic.get('wkj' )
lowerCAmelCase : int = model_dic.get('vji' )
lowerCAmelCase : str = model_dic.get('thre_conv1' )
lowerCAmelCase : Union[str, Any] = model_dic.get('thre_bp2' )
lowerCAmelCase : Dict = model_dic.get('thre_bp3' )
return conv_ins
def lowercase ( self , snake_case__ ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase ( self , snake_case__ ):
return round(snake_case__ , 3 )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
# convolution process
lowerCAmelCase : List[str] = convs[0]
lowerCAmelCase : str = convs[1]
lowerCAmelCase : Tuple = np.shape(snake_case__ )[0]
# get the data slice of original image data, data_focus
lowerCAmelCase : Any = []
for i_focus in range(0 , size_data - size_conv + 1 , snake_case__ ):
for j_focus in range(0 , size_data - size_conv + 1 , snake_case__ ):
lowerCAmelCase : int = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(snake_case__ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : List[str] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(snake_case__ ):
lowerCAmelCase : Optional[int] = []
for i_focus in range(len(snake_case__ ) ):
lowerCAmelCase : Optional[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(snake_case__ ) )
lowerCAmelCase : List[Any] = np.asmatrix(snake_case__ ).reshape(
snake_case__ , snake_case__ )
data_featuremap.append(snake_case__ )
# expanding the data slice to One dimenssion
lowerCAmelCase : Tuple = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(snake_case__ ) )
lowerCAmelCase : int = np.asarray(snake_case__ )
return focus_list, data_featuremap
def lowercase ( self , snake_case__ , snake_case__ , snake_case__="average_pool" ):
# pooling process
lowerCAmelCase : Optional[Any] = len(featuremaps[0] )
lowerCAmelCase : List[str] = int(size_map / size_pooling )
lowerCAmelCase : int = []
for i_map in range(len(snake_case__ ) ):
lowerCAmelCase : List[Any] = featuremaps[i_map]
lowerCAmelCase : Union[str, Any] = []
for i_focus in range(0 , snake_case__ , snake_case__ ):
for j_focus in range(0 , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(snake_case__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(snake_case__ ) )
lowerCAmelCase : Union[str, Any] = np.asmatrix(snake_case__ ).reshape(snake_case__ , snake_case__ )
featuremap_pooled.append(snake_case__ )
return featuremap_pooled
def lowercase ( self , snake_case__ ):
# expanding three dimension data to one dimension list
lowerCAmelCase : Optional[Any] = []
for i in range(len(snake_case__ ) ):
lowerCAmelCase : Optional[Any] = np.shape(data[i] )
lowerCAmelCase : Any = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCAmelCase : Tuple = data_listed.getA().tolist()[0]
data_expanded.extend(snake_case__ )
lowerCAmelCase : Optional[Any] = np.asarray(snake_case__ )
return data_expanded
def lowercase ( self , snake_case__ ):
# expanding matrix to one dimension list
lowerCAmelCase : List[str] = np.asarray(snake_case__ )
lowerCAmelCase : int = np.shape(snake_case__ )
lowerCAmelCase : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Tuple = []
lowerCAmelCase : Tuple = 0
for i_map in range(snake_case__ ):
lowerCAmelCase : Dict = np.ones((size_map, size_map) )
for i in range(0 , snake_case__ , snake_case__ ):
for j in range(0 , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = pd_pool[
i_pool
]
lowerCAmelCase : Union[str, Any] = i_pool + 1
lowerCAmelCase : Tuple = np.multiply(
snake_case__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(snake_case__ )
return pd_all
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=bool ):
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(snake_case__ )) )
print((' - - Shape: Teach_Data ', np.shape(snake_case__ )) )
lowerCAmelCase : Dict = 0
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[Any] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowerCAmelCase : Tuple = 0
print(f"-------------Learning Time {rp}--------------" )
for p in range(len(snake_case__ ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCAmelCase : List[str] = np.asmatrix(datas_train[p] )
lowerCAmelCase : List[Any] = np.asarray(datas_teach[p] )
lowerCAmelCase : Union[str, Any] = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase : Dict = self.pooling(snake_case__ , self.size_poolinga )
lowerCAmelCase : Dict = np.shape(snake_case__ )
lowerCAmelCase : Any = self._expand(snake_case__ )
lowerCAmelCase : Tuple = data_bp_input
lowerCAmelCase : Union[str, Any] = np.dot(snake_case__ , self.vji.T ) - self.thre_bpa
lowerCAmelCase : str = self.sig(snake_case__ )
lowerCAmelCase : Union[str, Any] = np.dot(snake_case__ , self.wkj.T ) - self.thre_bpa
lowerCAmelCase : Union[str, Any] = self.sig(snake_case__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCAmelCase : List[Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(snake_case__ , (1 - bp_outa) ) )
lowerCAmelCase : str = np.multiply(
np.dot(snake_case__ , self.wkj ) , np.multiply(snake_case__ , (1 - bp_outa) ) )
lowerCAmelCase : List[str] = np.dot(snake_case__ , self.vji )
lowerCAmelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCAmelCase : Tuple = pd_conva_pooled.T.getA().tolist()
lowerCAmelCase : List[Any] = self._calculate_gradient_from_pool(
snake_case__ , snake_case__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCAmelCase : Tuple = self._expand_mat(pd_conva_all[k_conv] )
lowerCAmelCase : int = self.rate_weight * np.dot(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCAmelCase : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCAmelCase : Any = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCAmelCase : str = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCAmelCase : str = self.thre_bpa - pd_k_all * self.rate_thre
lowerCAmelCase : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCAmelCase : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCAmelCase : int = rp + 1
lowerCAmelCase : int = error_count / patterns
all_mse.append(snake_case__ )
def draw_error():
lowerCAmelCase : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(snake_case__ , '+-' )
plt.plot(snake_case__ , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(snake_case__ , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def lowercase ( self , snake_case__ ):
# model predict
lowerCAmelCase : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(snake_case__ )) )
for p in range(len(snake_case__ ) ):
lowerCAmelCase : Dict = np.asmatrix(datas_test[p] )
lowerCAmelCase : str = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase : Any = self.pooling(snake_case__ , self.size_poolinga )
lowerCAmelCase : Union[str, Any] = self._expand(snake_case__ )
lowerCAmelCase : Optional[Any] = data_bp_input
lowerCAmelCase : Tuple = bp_outa * self.vji.T - self.thre_bpa
lowerCAmelCase : Any = self.sig(snake_case__ )
lowerCAmelCase : Any = bp_outa * self.wkj.T - self.thre_bpa
lowerCAmelCase : List[str] = self.sig(snake_case__ )
produce_out.extend(bp_outa.getA().tolist() )
lowerCAmelCase : Optional[int] = [list(map(self.do_round , snake_case__ ) ) for each in produce_out]
return np.asarray(snake_case__ )
def lowercase ( self , snake_case__ ):
# return the data of image after convoluting process so we can check it out
lowerCAmelCase : Optional[Any] = np.asmatrix(snake_case__ )
lowerCAmelCase : List[Any] = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase : int = self.pooling(snake_case__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 719
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_A , default=10_00 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_A , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_A , type=_A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_A , default=5_12 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase : Any = parser.parse_args()
return args
def __UpperCamelCase ( _A : Optional[int] ) -> int:
"""simple docstring"""
def fn(_A : Tuple ):
return tokenizer(examples['text'] )
return fn
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase : Optional[Any] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase : Any = tf.train.Features(feature=_A )
lowerCAmelCase : List[str] = tf.train.Example(features=_A )
lowerCAmelCase : Tuple = example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Optional[Any] = min(len(_A ) , args.limit )
lowerCAmelCase : Dict = dataset.select(range(_A ) )
print(F"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Any = tokenize_function(_A )
lowerCAmelCase : Optional[int] = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : str ):
# Concatenate all texts.
lowerCAmelCase : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : str = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : List[Any] = dataset_tokenized.map(_A , batched=_A , batch_size=10_00 , num_proc=4 )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['input_ids'] )
lowerCAmelCase : Union[str, Any] = os.path.join(_A , F"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase : List[Any] = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase : Union[str, Any] = serialized_examples[i]
out_file.write(_A )
print('Wrote file {} containing {} records'.format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , 'w' ) as f:
print(F"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
main(args)
| 646
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = 1
__UpperCamelCase = 3
__UpperCamelCase = (32, 32)
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(A_ )
return image
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),cross_attention_dim=32,)
return model
@property
def snake_case_ ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,)
return model
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,)
return CLIPTextModel(A_ )
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
def extract(*A_: Any,**A_: Dict ):
class __lowerCamelCase :
def __init__( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = torch.ones([0] )
def snake_case_ ( self: Optional[Any],A_: int ):
'''simple docstring'''
self.pixel_values.to(A_ )
return self
return Out()
return extract
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=A_,scheduler=A_,vae=A_,text_encoder=A_,tokenizer=A_,safety_checker=A_,feature_extractor=self.dummy_extractor,)
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = 'A painting of a squirrel eating a burger'
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt],generator=A_,guidance_scale=6.0,num_inference_steps=2,output_type='np' )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt],generator=A_,guidance_scale=6.0,num_inference_steps=2,output_type='np',return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=A_ )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=A_,scheduler=A_,vae=A_,text_encoder=A_,tokenizer=A_,safety_checker=A_,feature_extractor=self.dummy_extractor,)
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = 'A painting of a squirrel eating a burger'
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt],generator=A_,guidance_scale=6.0,num_inference_steps=2,output_type='np' )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt],generator=A_,guidance_scale=6.0,num_inference_steps=2,output_type='np',return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe',safety_checker=A_ )
assert isinstance(A_,A_ )
assert isinstance(pipe.scheduler,A_ )
assert pipe.safety_checker is None
__UpperCamelCase = pipe('example prompt',num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A_ )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(A_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCamelCase = pipe('example prompt',num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda','This test requires a GPU' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=A_ )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
__UpperCamelCase = unet.half()
__UpperCamelCase = vae.half()
__UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=A_,scheduler=A_,vae=A_,text_encoder=A_,tokenizer=A_,safety_checker=A_,feature_extractor=self.dummy_extractor,)
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = 'A painting of a squirrel eating a burger'
__UpperCamelCase = sd_pipe([prompt],num_inference_steps=2,output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5',safety_checker=A_ )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
__UpperCamelCase = 40_0366_0346
__UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__UpperCamelCase = torch.manual_seed(A_ )
__UpperCamelCase = sd_pipe(
[prompt],generator=A_,guidance_scale=A_,num_inference_steps=50,output_type='np',width=512,height=512,sld_guidance_scale=0,)
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__UpperCamelCase = torch.manual_seed(A_ )
__UpperCamelCase = sd_pipe(
[prompt],generator=A_,guidance_scale=A_,num_inference_steps=50,output_type='np',width=512,height=512,sld_guidance_scale=2000,sld_warmup_steps=7,sld_threshold=0.0_2_5,sld_momentum_scale=0.5,sld_mom_beta=0.7,)
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5',safety_checker=A_ )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = 'padme amidala taking a bath artwork, safe for work, no nudity'
__UpperCamelCase = 27_3497_1755
__UpperCamelCase = 7
__UpperCamelCase = torch.manual_seed(A_ )
__UpperCamelCase = sd_pipe(
[prompt],generator=A_,guidance_scale=A_,num_inference_steps=50,output_type='np',width=512,height=512,sld_guidance_scale=0,)
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__UpperCamelCase = torch.manual_seed(A_ )
__UpperCamelCase = sd_pipe(
[prompt],generator=A_,guidance_scale=A_,num_inference_steps=50,output_type='np',width=512,height=512,sld_guidance_scale=2000,sld_warmup_steps=7,sld_threshold=0.0_2_5,sld_momentum_scale=0.5,sld_mom_beta=0.7,)
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
__UpperCamelCase = 10_4435_5234
__UpperCamelCase = 12
__UpperCamelCase = torch.manual_seed(A_ )
__UpperCamelCase = sd_pipe(
[prompt],generator=A_,guidance_scale=A_,num_inference_steps=50,output_type='np',width=512,height=512,sld_guidance_scale=0,)
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__UpperCamelCase = torch.manual_seed(A_ )
__UpperCamelCase = sd_pipe(
[prompt],generator=A_,guidance_scale=A_,num_inference_steps=50,output_type='np',width=512,height=512,sld_guidance_scale=2000,sld_warmup_steps=7,sld_threshold=0.0_2_5,sld_momentum_scale=0.5,sld_mom_beta=0.7,)
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : int = '''bert'''
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 155
| 0
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_lowercase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def __lowerCAmelCase ( _UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowerCamelCase__: Dict = k.replace(_UpperCamelCase , _UpperCamelCase )
return k
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowerCamelCase__: str = DEFAULTS.copy()
cfg_kwargs.update(_UpperCamelCase )
lowerCamelCase__: Tuple = PegasusConfig(**_UpperCamelCase )
lowerCamelCase__: List[str] = PegasusForConditionalGeneration(_UpperCamelCase )
lowerCamelCase__: List[Any] = torch_model.model.state_dict()
lowerCamelCase__: List[str] = {}
for k, v in tf_weights.items():
lowerCamelCase__: str = rename_state_dict_key(_UpperCamelCase )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
lowerCamelCase__: str = v.T
lowerCamelCase__: Tuple = torch.tensor(_UpperCamelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
lowerCamelCase__: str = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowerCamelCase__: int = mapping["""shared.weight"""]
lowerCamelCase__: List[Any] = mapping["""shared.weight"""]
lowerCamelCase__: Any = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**_UpperCamelCase )
lowerCamelCase__ , lowerCamelCase__: Dict = torch_model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
lowerCamelCase__: Optional[Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def __lowerCAmelCase ( _UpperCamelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowerCamelCase__: str = tf.train.list_variables(_UpperCamelCase )
lowerCamelCase__: List[Any] = {}
lowerCamelCase__: List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(_UpperCamelCase , desc="""converting tf checkpoint to dict""" ):
lowerCamelCase__: Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCamelCase__: Any = tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
lowerCamelCase__: Any = array
return tf_weights
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] = Path(_UpperCamelCase ).parent.name
lowerCamelCase__: Any = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
lowerCamelCase__: Dict = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=_UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCamelCase )
# convert model
lowerCamelCase__: str = get_tf_weights_as_numpy(_UpperCamelCase )
lowerCamelCase__: Tuple = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
lowerCamelCase__: List[str] = task_specific_params
lowerCamelCase__: Dict = convert_pegasus(_UpperCamelCase , _UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
lowerCamelCase__: str = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(_UpperCamelCase , Path(_UpperCamelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
if args.save_dir is None:
_lowercase = Path(args.tf_ckpt_path).parent.name
_lowercase = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 242
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 242
| 1
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase_ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Any:
snake_case__ = GenerationConfig(
do_sample=lowerCAmelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , config_name=lowerCAmelCase__ )
snake_case__ = GenerationConfig.from_pretrained(lowerCAmelCase__ , config_name=lowerCAmelCase__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowerCAmelCase__ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
snake_case__ = AutoConfig.from_pretrained("""gpt2""" )
snake_case__ = GenerationConfig.from_model_config(lowerCAmelCase__ )
snake_case__ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
snake_case__ = GenerationConfig()
snake_case__ = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
snake_case__ = copy.deepcopy(lowerCAmelCase__ )
snake_case__ = generation_config.update(**lowerCAmelCase__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowerCAmelCase__ , {"""foo""": """bar"""} )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
snake_case__ = GenerationConfig()
snake_case__ = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowerCAmelCase__ )
snake_case__ = GenerationConfig.from_pretrained(lowerCAmelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
snake_case__ = GenerationConfig.from_model_config(lowerCAmelCase__ )
assert not hasattr(lowerCAmelCase__ , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase_ ( self : Dict ) -> str:
snake_case__ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowerCAmelCase__ )
self.assertEqual(default_config.num_beams , 1 )
snake_case__ = GenerationConfig(
do_sample=lowerCAmelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowerCAmelCase__ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
snake_case__ = GenerationConfig.from_pretrained(lowerCAmelCase__ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowerCAmelCase__ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase_ ( cls : Tuple ) -> Any:
snake_case__ = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
snake_case__ = GenerationConfig(
do_sample=lowerCAmelCase__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
snake_case__ = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id="""test-generation-config""" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
snake_case__ = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
snake_case__ = GenerationConfig(
do_sample=lowerCAmelCase__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
snake_case__ = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
snake_case__ = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
| 214
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : Union[str, Any] = logging.getLogger()
def _lowercase ( __UpperCamelCase : int ):
snake_case__ = {}
snake_case__ = os.path.join(__UpperCamelCase , """all_results.json""" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase , """r""" ) as f:
snake_case__ = json.load(__UpperCamelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
lowerCAmelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
import xla_spawn
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCAmelCase__ , """argv""" , lowerCAmelCase__ ):
snake_case__ = time()
xla_spawn.main()
snake_case__ = time()
snake_case__ = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
import xla_spawn
snake_case__ = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(lowerCAmelCase__ , """argv""" , lowerCAmelCase__ ):
xla_spawn.main()
| 214
| 1
|
def snake_case_ ( snake_case ) -> str:
lowercase__: Optional[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase__: Dict = ''
lowercase__: Optional[int] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase__: Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase__: Optional[Any] = [1 for i in range(len(snake_case ) )]
# for each character in new_string find corresponding palindromic string
lowercase__: str = 0
for j in range(len(snake_case ) ):
lowercase__: str = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase__: List[Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase__: Optional[int] = j - k + 1 # noqa: E741
lowercase__: List[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase__: Optional[Any] = length[j]
lowercase__: Dict = j
# create that string
lowercase__: Dict = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def snake_case_ ( snake_case = "" ) -> dict[str, float]:
lowercase__: Any = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
lowercase__: Optional[Any] = BeautifulSoup(requests.get(snake_case ).text , 'html.parser' )
lowercase__: Optional[int] = soup.find_all('td' , attrs='titleColumn' )
lowercase__: Optional[int] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(snake_case , snake_case )
}
def snake_case_ ( snake_case = "IMDb_Top_250_Movies.csv" ) -> None:
lowercase__: Optional[Any] = get_imdb_top_aaa_movies()
with open(snake_case , 'w' , newline='' ) as out_file:
lowercase__: Optional[Any] = csv.writer(snake_case )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 335
| 0
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ : Dict = """src/transformers"""
A_ : str = """docs/source/en/tasks"""
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple , lowercase_: List[Any] ) -> List[Any]:
with open(_a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ : List[str] = f.readlines()
# Find the start prompt.
A__ : Optional[Any] = 0
while not lines[start_index].startswith(_a ):
start_index += 1
start_index += 1
A__ : Dict = start_index
while not lines[end_index].startswith(_a ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ : Any = direct_transformers_import(TRANSFORMERS_PATH)
A_ : Any = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ : str = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Any:
A__ : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
A__ : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_a , set() )
A__ : List[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase (lowercase_: Tuple , lowercase_: Union[str, Any]=False ) -> Tuple:
A__ : Optional[Any] = _find_text_in_file(
filename=os.path.join(_a , _a ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
A__ : Tuple = get_model_list_for_task(_a )
if current_list != new_list:
if overwrite:
with open(os.path.join(_a , _a ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A_ : Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 456
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
def __init__( self ):
snake_case__ : List[str] =""""""
snake_case__ : List[Any] =""""""
snake_case__ : Optional[int] =[]
snake_case__ : Tuple =0
snake_case__ : Optional[Any] =2_5_6
snake_case__ : Optional[Any] =0
snake_case__ : str =0
snake_case__ : Any =0
snake_case__ : Dict =0
def lowercase__ ( self , a ):
snake_case__ : List[str] =cva.imread(a , 0 )
snake_case__ : Optional[Any] =copy.deepcopy(self.img )
snake_case__ , snake_case__ , snake_case__ : Any =plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
snake_case__ : Tuple =np.sum(a )
for i in range(len(a ) ):
snake_case__ : Union[str, Any] =x[i] / self.k
self.sk += prk
snake_case__ : Tuple =(self.L - 1) * self.sk
if self.rem != 0:
snake_case__ : int =int(last % last )
snake_case__ : List[Any] =int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(a )
snake_case__ : List[Any] =int(np.ma.count(self.img ) / self.img[1].size )
snake_case__ : Union[str, Any] =self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case__ : Optional[int] =self.img[j][i]
if num != self.last_list[num]:
snake_case__ : Any =self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def lowercase__ ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def lowercase__ ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
__lowerCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 385
| 0
|
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self , _snake_case ):
_UpperCAmelCase =order
# a_{0} ... a_{k}
_UpperCAmelCase =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_UpperCAmelCase =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_UpperCAmelCase =[0.0] * self.order
# y[n-1] ... y[n-k]
_UpperCAmelCase =[0.0] * self.order
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
if len(_snake_case ) < self.order:
_UpperCAmelCase =[1.0, *a_coeffs]
if len(_snake_case ) != self.order + 1:
_UpperCAmelCase =(
F"Expected a_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(_snake_case )}"
)
raise ValueError(_snake_case )
if len(_snake_case ) != self.order + 1:
_UpperCAmelCase =(
F"Expected b_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(_snake_case )}"
)
raise ValueError(_snake_case )
_UpperCAmelCase =a_coeffs
_UpperCAmelCase =b_coeffs
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_UpperCAmelCase =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_UpperCAmelCase =self.input_history[:-1]
_UpperCAmelCase =self.output_history[:-1]
_UpperCAmelCase =sample
_UpperCAmelCase =result
return result
| 592
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="ylacombe/bark-small"
_UpperCAmelCase =tempfile.mkdtemp()
_UpperCAmelCase ="en_speaker_1"
_UpperCAmelCase ="This is a test string"
_UpperCAmelCase ="speaker_embeddings_path.json"
_UpperCAmelCase ="speaker_embeddings"
def SCREAMING_SNAKE_CASE ( self , **_snake_case ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =BarkProcessor(tokenizer=_snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase =BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase =35
_UpperCAmelCase =2
_UpperCAmelCase =8
_UpperCAmelCase ={
"semantic_prompt": np.ones(_snake_case ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCAmelCase =processor(text=self.input_string , voice_preset=_snake_case )
_UpperCAmelCase =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCAmelCase =os.path.join(self.tmpdirname , "file.npz" )
np.savez(_snake_case , **_snake_case )
_UpperCAmelCase =processor(text=self.input_string , voice_preset=_snake_case )
_UpperCAmelCase =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCAmelCase =processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =BarkProcessor(tokenizer=_snake_case )
_UpperCAmelCase =processor(text=self.input_string )
_UpperCAmelCase =tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 592
| 1
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Any=9 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]=32 , UpperCamelCase__ : Optional[Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Tuple=8 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[str]=0.002 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , ):
A__ : Union[str, Any] =parent
A__ : Optional[int] =batch_size
A__ : Dict =encoder_seq_length
A__ : str =decoder_seq_length
# For common tests
A__ : int =self.decoder_seq_length
A__ : Tuple =is_training
A__ : Optional[int] =use_attention_mask
A__ : int =use_labels
A__ : str =vocab_size
A__ : str =hidden_size
A__ : Dict =num_hidden_layers
A__ : str =num_attention_heads
A__ : Union[str, Any] =d_ff
A__ : List[Any] =relative_attention_num_buckets
A__ : Optional[int] =dropout_rate
A__ : List[Any] =initializer_factor
A__ : Optional[Any] =eos_token_id
A__ : List[Any] =pad_token_id
A__ : Optional[int] =decoder_start_token_id
A__ : Any =None
A__ : Tuple =decoder_layers
def _UpperCAmelCase ( self : Any ):
return TaConfig.from_pretrained("google/umt5-base" )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[int]=None , ):
if attention_mask is None:
A__ : Any =input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A__ : Optional[int] =decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A__ : int =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_snake_case )
if decoder_head_mask is None:
A__ : Optional[Any] =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
if cross_attn_head_mask is None:
A__ : Optional[int] =torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _UpperCAmelCase ( self : List[Any] ):
A__ : List[str] =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A__ : List[Any] =input_ids.clamp(self.pad_token_id + 1 )
A__ : Dict =decoder_input_ids.clamp(self.pad_token_id + 1 )
A__ : Union[str, Any] =self.get_config()
A__ : List[Any] =config.num_attention_heads
A__ : Union[str, Any] =self.prepare_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, input_dict
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : int =self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : Dict ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self : str ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , ):
A__ : Dict =UMTaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ : List[Any] =model(
input_ids=_snake_case , decoder_input_ids=_snake_case , attention_mask=_snake_case , decoder_attention_mask=_snake_case , )
A__ : Optional[int] =model(input_ids=_snake_case , decoder_input_ids=_snake_case )
A__ : List[Any] =result.last_hidden_state
A__ : Any =result.past_key_values
A__ : Optional[Any] =result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , ):
A__ : Union[str, Any] =UMTaModel(config=_snake_case ).get_decoder().to(_snake_case ).eval()
# first forward pass
A__ : Optional[Any] =model(_snake_case , use_cache=_snake_case )
A__ : Optional[int] =model(_snake_case )
A__ : Any =model(_snake_case , use_cache=_snake_case )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 )
A__ : Any =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Optional[Any] =ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
A__ : Union[str, Any] =torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : int =model(_snake_case )['''last_hidden_state''']
A__ : Optional[Any] =model(_snake_case , past_key_values=_snake_case )['''last_hidden_state''']
# select random slice
A__ : Any =ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] =output_from_no_past[:, -1, random_slice_idx].detach()
A__ : Tuple =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , ):
A__ : Union[str, Any] =UMTaModel(config=_snake_case ).to(_snake_case ).half().eval()
A__ : int =model(**_snake_case )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(_snake_case ).any().item() )
@require_torch
class __lowerCAmelCase ( a_ , a_ , a_ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__magic_name__ : Dict = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__magic_name__ : int = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = False
__magic_name__ : List[str] = False
__magic_name__ : str = True
__magic_name__ : Tuple = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__magic_name__ : Union[str, Any] = [0.8, 0.9]
def _UpperCAmelCase ( self : Tuple ):
A__ : Dict =UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def _UpperCAmelCase ( self : Tuple ):
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
A__ : Tuple =UMTaModel(config_and_inputs[0] ).to(_snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=_snake_case , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_snake_case )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : List[str] =['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
A__ : Any =config_and_inputs[0]
A__ : List[Any] =UMTaForConditionalGeneration(_snake_case ).eval()
model.to(_snake_case )
A__ : Dict ={
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=_snake_case ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
}
for attn_name, (name, mask) in zip(_snake_case , head_masking.items() ):
A__ : Tuple ={name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
A__ : Union[str, Any] =torch.ones(
config.num_decoder_layers , config.num_heads , device=_snake_case )
A__ : str =model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=_snake_case , return_dict_in_generate=_snake_case , **_snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
A__ : Union[str, Any] =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def _UpperCAmelCase ( self : Optional[Any] ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def _UpperCAmelCase ( self : Any ):
A__ : List[str] =UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=_snake_case ).to(_snake_case )
A__ : Optional[Any] =AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=_snake_case , legacy=_snake_case )
A__ : Any =[
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
A__ : Optional[int] =tokenizer(_snake_case , return_tensors="pt" , padding=_snake_case ).input_ids
# fmt: off
A__ : List[str] =torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_snake_case , _snake_case )
A__ : Tuple =model.generate(input_ids.to(_snake_case ) )
A__ : Optional[Any] =[
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
A__ : List[str] =tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 656
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
A : int = {"""target_lang""": """fi""", """source_lang""": """en"""}
A : Tuple = """>>zh<<"""
A : Optional[int] = """Helsinki-NLP/"""
if is_torch_available():
A : Dict = """pt"""
elif is_tf_available():
A : Optional[int] = """tf"""
else:
A : List[str] = """jax"""
@require_sentencepiece
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
__UpperCAmelCase = MarianTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
def __snake_case ( self : List[str] ):
'''simple docstring'''
super().setUp()
snake_case : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case : Optional[Any] =dict(zip(_snake_case, range(len(_snake_case ) ) ) )
snake_case : Dict =Path(self.tmpdirname )
save_json(_snake_case, save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_snake_case, save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_snake_case, save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_snake_case, save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
snake_case : Any =MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[str], **_snake_case : Tuple ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname, **_snake_case )
def __snake_case ( self : Any, _snake_case : Dict ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : Optional[int] ='''</s>'''
snake_case : int =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ), _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ), _snake_case )
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : Tuple =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''</s>''' )
self.assertEqual(vocab_keys[1], '''<unk>''' )
self.assertEqual(vocab_keys[-1], '''<pad>''' )
self.assertEqual(len(_snake_case ), 9 )
def __snake_case ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 9 )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Tuple =MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
snake_case : List[str] =en_de_tokenizer(['''I am a small frog'''], return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
snake_case : Any =[38, 121, 14, 697, 38_848, 0]
self.assertListEqual(_snake_case, batch.input_ids[0] )
snake_case : List[Any] =tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_snake_case )
snake_case : List[Any] =[x.name for x in Path(_snake_case ).glob('''*''' )]
self.assertIn('''source.spm''', _snake_case )
MarianTokenizer.from_pretrained(_snake_case )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : Any =self.get_tokenizer()
snake_case : int =tok(
['''I am a small frog''' * 1_000, '''I am a small frog'''], padding=_snake_case, truncation=_snake_case, return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
self.assertEqual(batch.input_ids.shape, (2, 512) )
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : List[str] =self.get_tokenizer()
snake_case : int =tok(['''I am a tiny frog''', '''I am a small frog'''], padding=_snake_case, return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
self.assertEqual(batch_smaller.input_ids.shape, (2, 10) )
@slow
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
snake_case : List[Any] ={'''input_ids''': [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case, model_name='''Helsinki-NLP/opus-mt-en-de''', revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''', decode_kwargs={'''use_source_tokenizer''': True}, )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
snake_case : Optional[Any] =MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
snake_case : List[str] ='''Tämä on testi'''
snake_case : Optional[int] ='''This is a test'''
snake_case : Optional[Any] =[76, 7, 2_047, 2]
snake_case : int =[69, 12, 11, 940, 2]
snake_case : Optional[int] =tokenizer(_snake_case ).input_ids
self.assertListEqual(_snake_case, _snake_case )
snake_case : Optional[Any] =tokenizer(text_target=_snake_case ).input_ids
self.assertListEqual(_snake_case, _snake_case )
snake_case : Optional[int] =tokenizer.decode(_snake_case, skip_special_tokens=_snake_case )
self.assertEqual(_snake_case, _snake_case )
| 349
| 0
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCamelCase__ = controlnet_params
lowerCamelCase__ = 'bird'
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCamelCase__ = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
lowerCamelCase__ = replicate(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(
prompt_ids=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , prng_seed=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , jit=SCREAMING_SNAKE_CASE__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : str ):
lowerCamelCase__ , lowerCamelCase__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCamelCase__ = controlnet_params
lowerCamelCase__ = 'Chef in the kitchen'
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCamelCase__ = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
lowerCamelCase__ = replicate(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(
prompt_ids=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , prng_seed=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , jit=SCREAMING_SNAKE_CASE__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 716
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659
| 0
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
'''simple docstring'''
snake_case : Optional[int] = get_failure_array(lowerCamelCase__ )
# 2) Step through text searching for pattern
snake_case : Optional[Any] = 0, 0 # index into text, pattern
while i < len(lowerCamelCase__ ):
if pattern[j] == text[i]:
if j == (len(lowerCamelCase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case : str = failure[j - 1]
continue
i += 1
return False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> list[int]:
'''simple docstring'''
snake_case : Optional[int] = [0]
snake_case : Optional[int] = 0
snake_case : int = 1
while j < len(lowerCamelCase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case : int = failure[i - 1]
continue
j += 1
failure.append(lowerCamelCase__ )
return failure
if __name__ == "__main__":
# Test 1)
lowercase__ = 'abc1abc12'
lowercase__ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowercase__ = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowercase__ = 'ABABX'
lowercase__ = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
lowercase__ = 'AAAB'
lowercase__ = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
lowercase__ = 'abcdabcy'
lowercase__ = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
lowercase__ = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 638
|
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A__ : int = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
A__ : Tuple = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
A__ : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : List[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Dict = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
A__ : List[str] = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Any = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
A__ : Optional[int] = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Any = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
A__ : List[str] = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : List[Any] = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
A__ : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Tuple = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
A__ : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
A__ : Tuple = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
A__ : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
A__ : Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
A__ : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
A__ : List[str] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
A__ : Union[str, Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Dict = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
A__ : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
A__ : List[str] = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
A__ : Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
A__ : Optional[Any] = ''
A__ : Union[str, Any] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
A__ : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Dict = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) -> Dict:
assert ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple ) -> str:
with pytest.raises(lowerCamelCase__ , match=re.escape(expected_error.format(path="root" ) ) ):
lowerCamelCase_ : Any =ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> int:
with pytest.raises(lowerCamelCase__ , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _snake_case ( lowerCamelCase__ : Tuple ) -> Dict:
ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ , suppress_parsing_errors=lowerCamelCase__ )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Optional[int] =Path(lowerCamelCase__ ) / "README.md"
with open(lowerCamelCase__ , "w+" ) as readme_file:
readme_file.write(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _snake_case ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : int =Path(lowerCamelCase__ ) / "README.md"
with open(lowerCamelCase__ , "w+" ) as readme_file:
readme_file.write(lowerCamelCase__ )
lowerCamelCase_ : str =expected_error.format(path=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ , match=re.escape(lowerCamelCase__ ) ):
lowerCamelCase_ : Optional[Any] =ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Optional[Any] =Path(lowerCamelCase__ ) / "README.md"
with open(lowerCamelCase__ , "w+" ) as readme_file:
readme_file.write(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =expected_error.format(path=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ , match=re.escape(lowerCamelCase__ ) ):
ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _snake_case ( lowerCamelCase__ : str ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Tuple =Path(lowerCamelCase__ ) / "README.md"
with open(lowerCamelCase__ , "w+" ) as readme_file:
readme_file.write(lowerCamelCase__ )
ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ , suppress_parsing_errors=lowerCamelCase__ )
| 153
| 0
|
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = {}
lowerCamelCase__ = tokenizer(example["content"] , truncation=lowercase__)["input_ids"]
lowerCamelCase__ = len(example["content"]) / len(output["input_ids"])
return output
__A : Optional[Any] = HfArgumentParser(PretokenizationArguments)
__A : Any = parser.parse_args()
if args.num_workers is None:
__A : Optional[Any] = multiprocessing.cpu_count()
__A : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__A : Optional[Any] = time.time()
__A : Any = load_dataset(args.dataset_name, split="""train""")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
__A : List[Any] = time.time()
__A : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
__A : Dict = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 187
|
'''simple docstring'''
from __future__ import annotations
class lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : int ) -> None:
'''simple docstring'''
lowerCamelCase__ = order
# a_{0} ... a_{k}
lowerCamelCase__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCamelCase__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCamelCase__ = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCamelCase__ = [0.0] * self.order
def a__ ( self : Dict , __lowerCamelCase : list[float] , __lowerCamelCase : list[float] ) -> None:
'''simple docstring'''
if len(__lowerCamelCase ) < self.order:
lowerCamelCase__ = [1.0, *a_coeffs]
if len(__lowerCamelCase ) != self.order + 1:
lowerCamelCase__ = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__lowerCamelCase )}'''
)
raise ValueError(__lowerCamelCase )
if len(__lowerCamelCase ) != self.order + 1:
lowerCamelCase__ = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__lowerCamelCase )}'''
)
raise ValueError(__lowerCamelCase )
lowerCamelCase__ = a_coeffs
lowerCamelCase__ = b_coeffs
def a__ ( self : Dict , __lowerCamelCase : float ) -> float:
'''simple docstring'''
lowerCamelCase__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCamelCase__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCamelCase__ = self.input_history[:-1]
lowerCamelCase__ = self.output_history[:-1]
lowerCamelCase__ = sample
lowerCamelCase__ = result
return result
| 187
| 1
|
# flake8: noqa
# Lint as: python3
lowercase__ : Optional[int] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 312
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = ScoreSdeVeScheduler()
lowerCAmelCase = ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[
0
]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = '''google/ncsnpp-church-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=__SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 312
| 1
|
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] ) -> bool:
'''simple docstring'''
_snake_case = [int(a__ ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(a__ ) == 4 and all(0 <= int(a__ ) <= 254 for octet in octets )
if __name__ == "__main__":
UpperCAmelCase_ = input().strip()
UpperCAmelCase_ = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(F"{ip} is a {valid_or_invalid} IP v4 address.")
| 716
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=10 , lowerCAmelCase_=3 , lowerCAmelCase_=2 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_="divided_space_time" , lowerCAmelCase_=None , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = patch_size
_snake_case = num_frames
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = attention_type
_snake_case = initializer_range
_snake_case = scope
_snake_case = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_snake_case = (image_size // patch_size) ** 2
_snake_case = (num_frames) * self.num_patches_per_frame + 1
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> str:
_snake_case = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_snake_case = self.num_labels
return config
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = TimesformerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = TimesformerForVideoClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
# verify the logits shape
_snake_case = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCAmelCase_ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = TimesformerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Any:
_snake_case = copy.deepcopy(lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowerCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TimesformerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
if not self.has_attentions:
pass
else:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
for model_class in self.all_model_classes:
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.num_frames
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_snake_case = len(lowerCAmelCase_ )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCAmelCase ( self ) -> Dict:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.hidden_states
_snake_case = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
lowerCAmelCase_ )
_snake_case = self.default_image_processor
_snake_case = prepare_video()
_snake_case = image_processor(video[:8] , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_snake_case = model(**lowerCAmelCase_ )
# verify the logits
_snake_case = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 541
| 0
|
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
@staticmethod
def lowerCamelCase_ ( *__A : Optional[Any],**__A : int ):
pass
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.array(_lowerCAmelCase )
_lowerCamelCase : str = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase_ ( self : int,__A : Any,__A : Optional[int],__A : Optional[int] ):
_lowerCamelCase : Tuple = MaskGenerationPipeline(model=__A,image_processor=__A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Tuple,__A : List[Any],__A : Union[str, Any] ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCamelCase_ ( self : str ):
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = pipeline("mask-generation",model="facebook/sam-vit-huge" )
_lowerCamelCase : Tuple = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg",points_per_batch=2_5_6 )
# Shortening by hashing
_lowerCamelCase : Optional[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_8_0, 6_4_0)}, "scores": 0.9967},
{"mask": {"hash": "453c7844bd", "shape": (4_8_0, 6_4_0)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (4_8_0, 6_4_0)}, "scores": 0.9909},
{"mask": {"hash": "64033ddc3f", "shape": (4_8_0, 6_4_0)}, "scores": 0.9879},
{"mask": {"hash": "801064ff79", "shape": (4_8_0, 6_4_0)}, "scores": 0.9834},
{"mask": {"hash": "6172f276ef", "shape": (4_8_0, 6_4_0)}, "scores": 0.9716},
{"mask": {"hash": "b49e60e084", "shape": (4_8_0, 6_4_0)}, "scores": 0.9612},
{"mask": {"hash": "a811e775fd", "shape": (4_8_0, 6_4_0)}, "scores": 0.9599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_8_0, 6_4_0)}, "scores": 0.9552},
{"mask": {"hash": "9d8257e080", "shape": (4_8_0, 6_4_0)}, "scores": 0.9532},
{"mask": {"hash": "32de6454a8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9516},
{"mask": {"hash": "af3d4af2c8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9499},
{"mask": {"hash": "3c6db475fb", "shape": (4_8_0, 6_4_0)}, "scores": 0.9483},
{"mask": {"hash": "c290813fb9", "shape": (4_8_0, 6_4_0)}, "scores": 0.9464},
{"mask": {"hash": "b6f0b8f606", "shape": (4_8_0, 6_4_0)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (4_8_0, 6_4_0)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (4_8_0, 6_4_0)}, "scores": 0.9408},
{"mask": {"hash": "efb6cab859", "shape": (4_8_0, 6_4_0)}, "scores": 0.9335},
{"mask": {"hash": "1ff2eafb30", "shape": (4_8_0, 6_4_0)}, "scores": 0.9326},
{"mask": {"hash": "788b798e24", "shape": (4_8_0, 6_4_0)}, "scores": 0.9262},
{"mask": {"hash": "abea804f0e", "shape": (4_8_0, 6_4_0)}, "scores": 0.8999},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_8_0, 6_4_0)}, "scores": 0.8986},
{"mask": {"hash": "cd24047c8a", "shape": (4_8_0, 6_4_0)}, "scores": 0.8984},
{"mask": {"hash": "6943e6bcbd", "shape": (4_8_0, 6_4_0)}, "scores": 0.8873},
{"mask": {"hash": "b5f47c9191", "shape": (4_8_0, 6_4_0)}, "scores": 0.8871}
],)
# fmt: on
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = "facebook/sam-vit-huge"
_lowerCamelCase : Tuple = pipeline("mask-generation",model=__A )
_lowerCamelCase : Union[str, Any] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg",pred_iou_thresh=1,points_per_batch=2_5_6 )
# Shortening by hashing
_lowerCamelCase : List[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.0210},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0053},
],)
| 44
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCamelCase__ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def __A(lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
inspect_dataset(lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase = path + """.py"""
assert script_name in os.listdir(lowerCAmelCase )
assert "__pycache__" not in os.listdir(lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def __A(lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
inspect_metric(lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase = path + """.py"""
assert script_name in os.listdir(lowerCAmelCase )
assert "__pycache__" not in os.listdir(lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
"""simple docstring"""
_UpperCamelCase = get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
"""simple docstring"""
with pytest.raises(lowerCAmelCase ):
get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def __A(lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = get_dataset_config_names(lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
"""simple docstring"""
_UpperCamelCase = get_dataset_infos(lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase = expected_configs[0]
assert expected_config in infos
_UpperCamelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
"""simple docstring"""
_UpperCamelCase = get_dataset_infos(lowerCAmelCase )
assert expected_config in infos
_UpperCamelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
with pytest.raises(lowerCAmelCase ):
get_dataset_split_names(lowerCAmelCase , config_name=lowerCAmelCase )
| 704
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
def __init__( self , *a , **a ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , a , )
super().__init__(*a , **a )
| 202
| 0
|
def UpperCamelCase_( _A :int )-> bool:
if not isinstance(_A , _A ):
UpperCamelCase__ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_A )
if number < 0:
return False
UpperCamelCase__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : int = (DDIMParallelScheduler,)
_UpperCamelCase : List[Any] = (('eta', 0.0), ('num_inference_steps', 50))
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
UpperCamelCase__ = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**snake_case )
return config
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(**snake_case )
UpperCamelCase__ = scheduler_class(**snake_case )
UpperCamelCase__, UpperCamelCase__ = 10, 0.0
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(snake_case )
for t in scheduler.timesteps:
UpperCamelCase__ = model(snake_case , snake_case )
UpperCamelCase__ = scheduler.step(snake_case , snake_case , snake_case , snake_case ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case )
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__ = scheduler_class(**snake_case )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def snake_case__ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def snake_case__ ( self ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=snake_case , num_inference_steps=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=snake_case , eta=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**snake_case )
UpperCamelCase__, UpperCamelCase__ = 10, 0.0
scheduler.set_timesteps(snake_case )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
UpperCamelCase__ = self.dummy_sample_deter + 0.1
UpperCamelCase__ = self.dummy_sample_deter - 0.1
UpperCamelCase__ = samplea.shape[0]
UpperCamelCase__ = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase__ = torch.arange(snake_case )[0:3, None].repeat(1 , snake_case )
UpperCamelCase__ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase__ = scheduler.batch_step_no_noise(snake_case , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case )
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.full_loop()
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.full_loop(set_alpha_to_one=snake_case , beta_start=0.01 )
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.full_loop(set_alpha_to_one=snake_case , beta_start=0.01 )
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 551
| 1
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return [ord(_lowerCAmelCase ) - 96 for elem in plain]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : str = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _lowerCAmelCase )
print('''Decoded:''' , decode(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 711
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : int = "falcon"
__lowerCamelCase : int = ["past_key_values"]
def __init__( self, lowerCamelCase__=6_5024, lowerCamelCase__=4544, lowerCamelCase__=32, lowerCamelCase__=71, lowerCamelCase__=1e-5, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=None, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=11, lowerCamelCase__=11, **lowerCamelCase__, ):
A : List[str] = vocab_size
# Backward compatibility with n_embed kwarg
A : Optional[Any] = kwargs.pop("""n_embed""", lowerCamelCase_ )
A : Optional[Any] = hidden_size if n_embed is None else n_embed
A : Optional[Any] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : int = layer_norm_epsilon
A : Dict = initializer_range
A : Union[str, Any] = use_cache
A : Any = hidden_dropout
A : int = attention_dropout
A : Optional[int] = bos_token_id
A : Union[str, Any] = eos_token_id
A : Optional[int] = num_attention_heads if num_kv_heads is None else num_kv_heads
A : Any = alibi
A : Dict = new_decoder_architecture
A : Any = multi_query # Ignored when new_decoder_architecture is True
A : Optional[int] = parallel_attn
A : str = bias
super().__init__(bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
@property
def _lowerCAmelCase ( self ):
return self.hidden_size // self.num_attention_heads
@property
def _lowerCAmelCase ( self ):
return not self.alibi
| 662
|
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 34
| 0
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = generate_pascal_triangle(SCREAMING_SNAKE_CASE )
for row_idx in range(SCREAMING_SNAKE_CASE ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase :list[list[int]] = []
for current_row_idx in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[Any] = populate_current_row(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
triangle.append(SCREAMING_SNAKE_CASE )
return triangle
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase :int = 1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE ):
calculate_current_element(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return current_row
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :Dict = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase :List[Any] = triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase :List[Any] = above_to_left_elt + above_to_right_elt
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase :list[list[int]] = [[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = [0] + result[-1] + [0]
__UpperCamelCase :Any = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase :Optional[Any] = sum(divmod(SCREAMING_SNAKE_CASE , 2 ) )
__UpperCamelCase :Union[str, Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase :List[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase :List[str] = row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE )
return result
def lowerCamelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
__UpperCamelCase :List[str] = f"""{func.__name__}({value})"""
__UpperCamelCase :Optional[int] = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 452
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''')
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained('''google/mt5-small''')
__UpperCamelCase :Optional[Any] = tokenizer('''Hello there''' , return_tensors='''np''').input_ids
__UpperCamelCase :List[str] = tokenizer('''Hi I am''' , return_tensors='''np''').input_ids
__UpperCamelCase :Optional[int] = shift_tokens_right(__lowercase , model.config.pad_token_id , model.config.decoder_start_token_id)
__UpperCamelCase :Tuple = model(__lowercase , decoder_input_ids=__lowercase).logits
__UpperCamelCase :Any = optax.softmax_cross_entropy(__lowercase , onehot(__lowercase , logits.shape[-1])).mean()
__UpperCamelCase :str = -(labels.shape[-1] * loss.item())
__UpperCamelCase :Optional[Any] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 452
| 1
|
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase__ = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def snake_case_ ( A_ : float, A_ : str, A_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case=True, snake_case="pt"):
__snake_case = {'''add_prefix_space''': True} if isinstance(snake_case, snake_case) and not line.startswith(''' ''') else {}
__snake_case = padding_side
return tokenizer(
[line], max_length=snake_case, padding='''max_length''' if pad_to_max_length else None, truncation=snake_case, return_tensors=snake_case, add_special_tokens=snake_case, **snake_case, )
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case=None, ):
__snake_case = input_ids.ne(snake_case).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str , A_ : Any , A_ : int , A_ : Dict , A_ : Union[str, Any] , A_ : List[Any]="train" , A_ : Dict=None , A_ : str=None , A_ : List[Any]=None , A_ : int="" , ) -> str:
super().__init__()
__snake_case = Path(A_ ).joinpath(type_path + '''.source''' )
__snake_case = Path(A_ ).joinpath(type_path + '''.target''' )
__snake_case = self.get_char_lens(self.src_file )
__snake_case = max_source_length
__snake_case = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
__snake_case = tokenizer
__snake_case = prefix
if n_obs is not None:
__snake_case = self.src_lens[:n_obs]
__snake_case = src_lang
__snake_case = tgt_lang
def __len__( self : Optional[int] ) -> Union[str, Any]:
return len(self.src_lens )
def __getitem__( self : Any , A_ : Any ) -> Dict[str, torch.Tensor]:
__snake_case = index + 1 # linecache starts at 1
__snake_case = self.prefix + linecache.getline(str(self.src_file ) , A_ ).rstrip('''\n''' )
__snake_case = linecache.getline(str(self.tgt_file ) , A_ ).rstrip('''\n''' )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A_ ) else self.tokenizer
)
__snake_case = self.tokenizer.generator if isinstance(self.tokenizer , A_ ) else self.tokenizer
__snake_case = encode_line(A_ , A_ , self.max_source_length , '''right''' )
__snake_case = encode_line(A_ , A_ , self.max_target_length , '''right''' )
__snake_case = source_inputs['''input_ids'''].squeeze()
__snake_case = target_inputs['''input_ids'''].squeeze()
__snake_case = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase ( A_ : Optional[Any] ) -> Any:
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def lowercase ( self : List[str] , A_ : Any ) -> Dict[str, torch.Tensor]:
__snake_case = torch.stack([x['''input_ids'''] for x in batch] )
__snake_case = torch.stack([x['''attention_mask'''] for x in batch] )
__snake_case = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
__snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
__snake_case = trim_batch(A_ , A_ )
__snake_case , __snake_case = trim_batch(A_ , A_ , attention_mask=A_ )
__snake_case = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__lowercase : Optional[int] = getLogger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case):
return list(itertools.chain.from_iterable(snake_case))
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = get_git_info()
save_json(snake_case, os.path.join(snake_case, '''git_log.json'''))
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case=4, **snake_case):
with open(snake_case, '''w''') as f:
json.dump(snake_case, snake_case, indent=snake_case, **snake_case)
def SCREAMING_SNAKE_CASE ( snake_case):
with open(snake_case) as f:
return json.load(snake_case)
def SCREAMING_SNAKE_CASE ( ):
__snake_case = git.Repo(search_parent_directories=snake_case)
__snake_case = {
'''repo_id''': str(snake_case),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return list(map(snake_case, snake_case))
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
with open(snake_case, '''wb''') as f:
return pickle.dump(snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case):
def remove_articles(snake_case):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', snake_case)
def white_space_fix(snake_case):
return " ".join(text.split())
def remove_punc(snake_case):
__snake_case = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case))))
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = normalize_answer(snake_case).split()
__snake_case = normalize_answer(snake_case).split()
__snake_case = Counter(snake_case) & Counter(snake_case)
__snake_case = sum(common.values())
if num_same == 0:
return 0
__snake_case = 1.0 * num_same / len(snake_case)
__snake_case = 1.0 * num_same / len(snake_case)
__snake_case = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return normalize_answer(snake_case) == normalize_answer(snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
assert len(snake_case) == len(snake_case)
__snake_case = 0
for hypo, pred in zip(snake_case, snake_case):
em += exact_match_score(snake_case, snake_case)
if len(snake_case) > 0:
em /= len(snake_case)
return {"em": em}
def SCREAMING_SNAKE_CASE ( snake_case):
return model_prefix.startswith('''rag''')
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__snake_case = '''dropout_rate'''
for p in extra_params:
if getattr(snake_case, snake_case, snake_case):
if not hasattr(snake_case, snake_case) and not hasattr(snake_case, equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(snake_case))
delattr(snake_case, snake_case)
continue
__snake_case = p if hasattr(snake_case, snake_case) else equivalent_param[p]
setattr(snake_case, snake_case, getattr(snake_case, snake_case))
delattr(snake_case, snake_case)
return hparams, config
| 564
| 0
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase_ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase_ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase_ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase_ = requests.get(image_url).content
lowerCamelCase_ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 709
|
import math
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =2
SCREAMING_SNAKE_CASE__ =int(math.sqrt(__UpperCamelCase ) ) # Size of every segment
SCREAMING_SNAKE_CASE__ =[True] * (end + 1)
SCREAMING_SNAKE_CASE__ =[]
while start <= end:
if temp[start] is True:
in_prime.append(__UpperCamelCase )
for i in range(start * start, end + 1, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =False
start += 1
prime += in_prime
SCREAMING_SNAKE_CASE__ =end + 1
SCREAMING_SNAKE_CASE__ =min(2 * end, __UpperCamelCase )
while low <= n:
SCREAMING_SNAKE_CASE__ =[True] * (high - low + 1)
for each in in_prime:
SCREAMING_SNAKE_CASE__ =math.floor(low / each ) * each
if t < low:
t += each
for j in range(__UpperCamelCase, high + 1, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =False
for j in range(len(__UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
SCREAMING_SNAKE_CASE__ =high + 1
SCREAMING_SNAKE_CASE__ =min(high + end, __UpperCamelCase )
return prime
print(sieve(10**6))
| 588
| 0
|
def UpperCamelCase_( snake_case__: int = 10_00 ) -> Dict:
UpperCAmelCase__ = 3
UpperCAmelCase__ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 146
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCamelCase ( __lowerCamelCase : int = 8 ):
snake_case : int = ascii_letters + digits + punctuation
return "".join(secrets.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__lowerCamelCase )
snake_case : Any = i // 3
snake_case : Optional[int] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case : Tuple = (
chars_incl
+ random(__lowerCamelCase , quotient + remainder )
+ random(__lowerCamelCase , __lowerCamelCase )
+ random(__lowerCamelCase , __lowerCamelCase )
)
snake_case : Optional[Any] = list(__lowerCamelCase )
shuffle(__lowerCamelCase )
return "".join(__lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ):
return "".join(secrets.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : str ):
pass # Put your code here...
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
pass # Put your code here...
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
pass # Put your code here...
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int = 8 ):
if len(__lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case : Dict = any(char in ascii_uppercase for char in password )
snake_case : Optional[int] = any(char in ascii_lowercase for char in password )
snake_case : str = any(char in digits for char in password )
snake_case : str = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCamelCase ( ):
snake_case : int = int(input("Please indicate the max length of your password: " ).strip() )
snake_case : Union[str, Any] = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(__lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(__lowerCamelCase , __lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 204
| 0
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowerCAmelCase_ = logging.getLogger(__name__)
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> int:
'''simple docstring'''
A = False
def A( self : str ,_SCREAMING_SNAKE_CASE : Optional[Any] ,_SCREAMING_SNAKE_CASE : Optional[int] ,_SCREAMING_SNAKE_CASE : Optional[int] ,_SCREAMING_SNAKE_CASE : Dict ) -> Dict:
'''simple docstring'''
if not self.initialized:
A = RagRetriever(
_SCREAMING_SNAKE_CASE ,question_encoder_tokenizer=_SCREAMING_SNAKE_CASE ,generator_tokenizer=_SCREAMING_SNAKE_CASE ,index=_SCREAMING_SNAKE_CASE ,init_retrieval=_SCREAMING_SNAKE_CASE ,)
A = True
def A( self : Any ) -> Optional[int]:
'''simple docstring'''
self.retriever.index.init_index()
def A( self : List[Any] ,_SCREAMING_SNAKE_CASE : Optional[Any] ,_SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
'''simple docstring'''
A , A = self.retriever._main_retrieve(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return doc_ids, retrieved_doc_embeds
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self : int ,_SCREAMING_SNAKE_CASE : Optional[Any] ,_SCREAMING_SNAKE_CASE : Optional[int] ,_SCREAMING_SNAKE_CASE : Optional[int] ,_SCREAMING_SNAKE_CASE : Dict ,_SCREAMING_SNAKE_CASE : List[str]=None ) -> Tuple:
'''simple docstring'''
if index is not None and index.is_initialized() and len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
_SCREAMING_SNAKE_CASE ,question_encoder_tokenizer=_SCREAMING_SNAKE_CASE ,generator_tokenizer=_SCREAMING_SNAKE_CASE ,index=_SCREAMING_SNAKE_CASE ,init_retrieval=_SCREAMING_SNAKE_CASE ,)
A = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for worker in self.retrieval_workers
] )
def A( self : Tuple ) -> int:
'''simple docstring'''
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def A( self : Tuple ,_SCREAMING_SNAKE_CASE : Union[str, Any] ,_SCREAMING_SNAKE_CASE : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )]
A , A = ray.get(random_worker.retrieve.remote(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
else:
A , A = self._main_retrieve(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_SCREAMING_SNAKE_CASE )
@classmethod
def A( cls : Union[str, Any] ,_SCREAMING_SNAKE_CASE : Any ,_SCREAMING_SNAKE_CASE : Union[str, Any]=None ,**_SCREAMING_SNAKE_CASE : Any ) -> List[str]:
'''simple docstring'''
return super(_SCREAMING_SNAKE_CASE ,cls ).get_tokenizers(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
@classmethod
def A( cls : Union[str, Any] ,_SCREAMING_SNAKE_CASE : Any ,_SCREAMING_SNAKE_CASE : Dict ,_SCREAMING_SNAKE_CASE : Any=None ,**_SCREAMING_SNAKE_CASE : int ) -> str:
'''simple docstring'''
A = kwargs.pop('config' ,_SCREAMING_SNAKE_CASE ) or RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
A = RagTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
A = rag_tokenizer.question_encoder
A = rag_tokenizer.generator
if indexed_dataset is not None:
A = 'custom'
A = CustomHFIndex(config.retrieval_vector_size ,_SCREAMING_SNAKE_CASE )
else:
A = cls._build_index(_SCREAMING_SNAKE_CASE )
return cls(
_SCREAMING_SNAKE_CASE ,question_encoder_tokenizer=_SCREAMING_SNAKE_CASE ,generator_tokenizer=_SCREAMING_SNAKE_CASE ,retrieval_workers=_SCREAMING_SNAKE_CASE ,index=_SCREAMING_SNAKE_CASE ,)
| 110
|
from collections.abc import Callable
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple ,_SCREAMING_SNAKE_CASE : Callable | None = None ) -> None:
'''simple docstring'''
# Stores actual heap items.
A = []
# Stores indexes of each item for supporting updates and deletion.
A = {}
# Stores current size of heap.
A = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
A = key or (lambda _SCREAMING_SNAKE_CASE : x)
def A( self : Tuple ,_SCREAMING_SNAKE_CASE : int ) -> int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : int ) -> int | None:
'''simple docstring'''
A = int(2 * i + 1 )
return left if 0 < left < self.size else None
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : int ) -> int | None:
'''simple docstring'''
A = int(2 * i + 2 )
return right if 0 < right < self.size else None
def A( self : List[str] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
A , A = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
A , A = self.arr[j], self.arr[i]
def A( self : List[str] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : int ) -> bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def A( self : int ,_SCREAMING_SNAKE_CASE : int ) -> int:
'''simple docstring'''
A = self._left(_SCREAMING_SNAKE_CASE )
A = self._right(_SCREAMING_SNAKE_CASE )
A = i
if left is not None and not self._cmp(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = left
if right is not None and not self._cmp(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = right
return valid_parent
def A( self : Any ,_SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
A = self._parent(_SCREAMING_SNAKE_CASE )
while parent is not None and not self._cmp(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self._swap(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
A , A = parent, self._parent(_SCREAMING_SNAKE_CASE )
def A( self : List[Any] ,_SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
A = self._get_valid_parent(_SCREAMING_SNAKE_CASE )
while valid_parent != index:
self._swap(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
A , A = valid_parent, self._get_valid_parent(_SCREAMING_SNAKE_CASE )
def A( self : Optional[Any] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
A = self.pos_map[item]
A = [item, self.key(_SCREAMING_SNAKE_CASE )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_SCREAMING_SNAKE_CASE )
self._heapify_down(_SCREAMING_SNAKE_CASE )
def A( self : int ,_SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
A = self.pos_map[item]
del self.pos_map[item]
A = self.arr[self.size - 1]
A = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_SCREAMING_SNAKE_CASE )
self._heapify_down(_SCREAMING_SNAKE_CASE )
def A( self : Optional[Any] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
A = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_SCREAMING_SNAKE_CASE )] )
else:
A = [item, self.key(_SCREAMING_SNAKE_CASE )]
A = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def A( self : str ) -> tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def A( self : Any ) -> tuple | None:
'''simple docstring'''
A = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def snake_case ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _A ( _a : Callable[[int | float], int | float] , _a : int | float , _a : int | float , _a : int = 1_0_0 , ):
"""simple docstring"""
A = x_start
A = fnc(_a )
A = 0.0
for _ in range(_a ):
# Approximates curve as a sequence of linear lines and sums their length
A = (x_end - x_start) / steps + xa
A = fnc(_a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A = xa
A = fxa
return length
if __name__ == "__main__":
def _A ( _a : Union[str, Any] ):
"""simple docstring"""
return math.sin(1_0 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
UpperCAmelCase =10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 617
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _A ( *_a : int ):
"""simple docstring"""
if not isinstance(_a , _a ):
A = list(_a )
for i in range(len(_a ) ):
A = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _A ( _a : Exception ):
"""simple docstring"""
A = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(_a , _a ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _A ( _a : callable = None , _a : int = 1_2_8 ):
"""simple docstring"""
if function is None:
return functools.partial(_a , starting_batch_size=_a )
A = starting_batch_size
def decorator(*_a : Union[str, Any] , **_a : List[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
A = list(inspect.signature(_a ).parameters.keys() )
# Guard against user error
if len(_a ) < (len(_a ) + 1):
A = """, """.join([f'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'Batch size was passed into `{function.__name__}` as the first argument when called.'
f'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(_a , *_a , **_a )
except Exception as e:
if should_reduce_batch_size(_a ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 617
| 1
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = Dict[str, Any]
UpperCAmelCase = List[Prediction]
@add_end_docstrings(__UpperCAmelCase )
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : List[Any] , *snake_case__ : str , **snake_case__ : str ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __snake_case ( self : Optional[int] , **snake_case__ : Tuple ):
'''simple docstring'''
lowercase :str = {}
if "threshold" in kwargs:
lowercase :Optional[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : Optional[Any] , *snake_case__ : int , **snake_case__ : Optional[Any] ):
'''simple docstring'''
return super().__call__(*snake_case__ , **snake_case__ )
def __snake_case ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
lowercase :Union[str, Any] = load_image(snake_case__ )
lowercase :Tuple = torch.IntTensor([[image.height, image.width]] )
lowercase :Any = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
lowercase :List[str] = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
lowercase :List[Any] = target_size
return inputs
def __snake_case ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :List[str] = model_inputs.pop('''target_size''' )
lowercase :Optional[Any] = self.model(**snake_case__ )
lowercase :Union[str, Any] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
lowercase :List[str] = model_inputs['''bbox''']
return model_outputs
def __snake_case ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any]=0.9 ):
'''simple docstring'''
lowercase :Union[str, Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowercase , lowercase :List[str] = target_size[0].tolist()
def unnormalize(snake_case__ : Union[str, Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
lowercase , lowercase :int = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowercase :Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowercase :List[str] = [unnormalize(snake_case__ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
lowercase :Optional[int] = ['''score''', '''label''', '''box''']
lowercase :Optional[Any] = [dict(zip(snake_case__ , snake_case__ ) ) for vals in zip(scores.tolist() , snake_case__ , snake_case__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowercase :Dict = self.image_processor.post_process_object_detection(snake_case__ , snake_case__ , snake_case__ )
lowercase :List[Any] = raw_annotations[0]
lowercase :Dict = raw_annotation['''scores''']
lowercase :Optional[int] = raw_annotation['''labels''']
lowercase :int = raw_annotation['''boxes''']
lowercase :Dict = scores.tolist()
lowercase :Any = [self.model.config.idalabel[label.item()] for label in labels]
lowercase :Any = [self._get_bounding_box(snake_case__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowercase :Optional[Any] = ['''score''', '''label''', '''box''']
lowercase :str = [
dict(zip(snake_case__ , snake_case__ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def __snake_case ( self : str , snake_case__ : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
lowercase , lowercase , lowercase , lowercase :Dict = box.int().tolist()
lowercase :Any = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 475
|
"""simple docstring"""
import math
def lowerCamelCase (a_ :int) -> bool:
assert isinstance(a_ , a_) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowercase :Any = range(3 , int(math.sqrt(a_) + 1) , 2)
return not any(not number % i for i in odd_numbers)
def lowerCamelCase (a_ :Optional[int] , a_ :Optional[int]=1 , **a_ :List[str]) -> Any:
lowercase :str = factor * value
lowercase :int = value
while not is_prime(a_):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **a_)
return value
| 475
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
_UpperCAmelCase : int = StableDiffusionSAGPipeline
_UpperCAmelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : str = False
def __lowerCamelCase ( self : str ) ->Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
lowerCamelCase__ : List[str] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCamelCase__ : Optional[int] = CLIPTextModel(lowerCamelCase_ )
lowerCamelCase__ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase__ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCamelCase ( self : str , A : Any , A : Optional[int]=0 ) ->Any:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Tuple = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : str = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCamelCase ( self : int ) ->List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : List[Any] ) ->List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Any ) ->Optional[int]:
lowerCamelCase__ : List[str] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowerCamelCase__ : int = sag_pipe.to(lowerCamelCase_ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = '''.'''
lowerCamelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = sag_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowerCamelCase ( self : Any ) ->Any:
lowerCamelCase__ : Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCamelCase__ : Tuple = sag_pipe.to(lowerCamelCase_ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[str] = '''.'''
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = sag_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' )
lowerCamelCase__ : Optional[int] = output.images
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowerCamelCase ( self : Any ) ->Optional[Any]:
lowerCamelCase__ : Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCamelCase__ : Any = sag_pipe.to(lowerCamelCase_ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = '''.'''
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase__ : Any = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=lowerCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' , )
lowerCamelCase__ : str = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 315
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : List[str] = 250004
lowerCamelCase__ : str = 250020
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartaaTokenizer
UpperCamelCase = MBartaaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''<s>'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE : Dict = 1
return cls
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : int = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 698
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["PerceiverFeatureExtractor"]
lowerCamelCase_ = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700
|
from __future__ import annotations
def UpperCAmelCase_ ( __UpperCamelCase ):
if not nums:
return 0
SCREAMING_SNAKE_CASE__ =nums[0]
SCREAMING_SNAKE_CASE__ =0
for num in nums[1:]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =(
max_excluding + num,
max(__UpperCamelCase, __UpperCamelCase ),
)
return max(__UpperCamelCase, __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588
| 0
|
'''simple docstring'''
def snake_case ( a_ : Union[str, Any] , a_ : Any ) -> Any:
"""simple docstring"""
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(a_ ) * abs(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 208
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : Tuple = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 102
| 0
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
__UpperCAmelCase = {"""target_lang""": """fi""", """source_lang""": """en"""}
__UpperCAmelCase = """>>zh<<"""
__UpperCAmelCase = """Helsinki-NLP/"""
if is_torch_available():
__UpperCAmelCase = """pt"""
elif is_tf_available():
__UpperCAmelCase = """tf"""
else:
__UpperCAmelCase = """jax"""
@require_sentencepiece
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =MarianTokenizer
lowerCamelCase : str =False
lowerCamelCase : Any =True
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : List[str] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowerCAmelCase : Tuple = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
__lowerCAmelCase : List[str] = Path(self.tmpdirname )
save_json(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowerCAmelCase : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict , **lowerCAmelCase : Any ) -> MarianTokenizer:
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
"""simple docstring"""
__lowerCAmelCase : int = """</s>"""
__lowerCAmelCase : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowerCAmelCase ) , 9 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
__lowerCAmelCase : List[str] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowerCAmelCase , batch.input_ids[0] )
__lowerCAmelCase : Any = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : Tuple = [x.name for x in Path(lowerCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , lowerCAmelCase )
MarianTokenizer.from_pretrained(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : Union[str, Any] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : str = self.get_tokenizer()
__lowerCAmelCase : List[Any] = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowerCAmelCase : Dict = """Tämä on testi"""
__lowerCAmelCase : Optional[int] = """This is a test"""
__lowerCAmelCase : Tuple = [76, 7, 20_47, 2]
__lowerCAmelCase : Tuple = [69, 12, 11, 9_40, 2]
__lowerCAmelCase : Optional[int] = tokenizer(lowerCAmelCase ).input_ids
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = tokenizer(text_target=lowerCAmelCase ).input_ids
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 710
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def snake_case_ (__A : int ) -> str:
__lowerCAmelCase : str = int(__A )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def snake_case_ (__A : Dict , __A : Any , __A : List[str] , __A : Optional[int] , __A : Dict=3_0_0 ) -> int:
# docstyle-ignore
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def snake_case_ (__A : Optional[Any] ) -> Tuple:
__lowerCAmelCase : List[Any] = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowerCAmelCase : Any = f'''{elt:.6f}''' if isinstance(__A , __A ) else str(__A )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[int] =5
lowerCamelCase : Tuple =0.2
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Optional["NotebookTrainingTracker"] = None , lowerCAmelCase : int = 3_00 , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = total
__lowerCAmelCase : Dict = """""" if prefix is None else prefix
__lowerCAmelCase : str = leave
__lowerCAmelCase : Optional[Any] = parent
__lowerCAmelCase : Optional[Any] = width
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : int , lowerCAmelCase : bool = False , lowerCAmelCase : str = None ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = value
if comment is not None:
__lowerCAmelCase : Optional[Any] = comment
if self.last_value is None:
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : Optional[int] = value
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Any = self.warmup
__lowerCAmelCase : List[str] = 1
self.update_bar(lowerCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowerCAmelCase : Optional[Any] = time.time()
__lowerCAmelCase : Optional[int] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowerCAmelCase : Optional[Any] = self.elapsed_time / (value - self.start_value)
else:
__lowerCAmelCase : str = None
if value >= self.total:
__lowerCAmelCase : Any = self.total
__lowerCAmelCase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowerCAmelCase : List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase )
__lowerCAmelCase : str = value
__lowerCAmelCase : Union[str, Any] = current_time
if self.average_time_per_item is None:
__lowerCAmelCase : Optional[Any] = 1
else:
__lowerCAmelCase : List[str] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = """ """ * (len(str(self.total ) ) - len(str(lowerCAmelCase ) )) + str(lowerCAmelCase )
if self.elapsed_time is None:
__lowerCAmelCase : List[str] = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__lowerCAmelCase : Dict = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__lowerCAmelCase : Dict = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowerCAmelCase : List[str] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
super().__init__(lowerCAmelCase )
__lowerCAmelCase : str = None if column_names is None else [column_names]
__lowerCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowerCAmelCase : Optional[int] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self.inner_table is None:
__lowerCAmelCase : Tuple = [list(values.keys() ), list(values.values() )]
else:
__lowerCAmelCase : Dict = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase )
__lowerCAmelCase : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=3_00 ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = NotebookProgressBar(lowerCAmelCase , prefix=lowerCAmelCase , parent=self , width=lowerCAmelCase )
return self.child_bar
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = None
self.display()
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = None
__lowerCAmelCase : Any = None
__lowerCAmelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , **lowerCAmelCase : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase : int = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : str = 0
__lowerCAmelCase : List[Any] = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
__lowerCAmelCase : int = NotebookTrainingTracker(state.max_steps , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , **lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__lowerCAmelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
if not has_length(lowerCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowerCAmelCase : List[str] = self.training_tracker.add_child(len(lowerCAmelCase ) )
else:
__lowerCAmelCase : List[Any] = NotebookProgressBar(len(lowerCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowerCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any=None , **lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowerCAmelCase : List[str] = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowerCAmelCase : Tuple = state.global_step
self.training_tracker.write_line(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int=None , **lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
if self.training_tracker is not None:
__lowerCAmelCase : Union[str, Any] = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
__lowerCAmelCase : int = log["""loss"""]
break
if self.first_column == "Epoch":
__lowerCAmelCase : int = int(state.epoch )
else:
__lowerCAmelCase : Optional[int] = state.global_step
__lowerCAmelCase : Union[str, Any] = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
__lowerCAmelCase : Dict = re.sub(r"""\_loss$""" , """""" , lowerCAmelCase )
__lowerCAmelCase : Tuple = metrics.pop("""total_flos""" , lowerCAmelCase )
__lowerCAmelCase : List[Any] = metrics.pop("""epoch""" , lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = metrics.pop(f'''{metric_key_prefix}_runtime''' , lowerCAmelCase )
__lowerCAmelCase : Tuple = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , lowerCAmelCase )
__lowerCAmelCase : List[Any] = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , lowerCAmelCase )
__lowerCAmelCase : Dict = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , lowerCAmelCase )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
__lowerCAmelCase : Tuple = v
else:
__lowerCAmelCase : Any = k.split("""_""" )
__lowerCAmelCase : Optional[Any] = """ """.join([part.capitalize() for part in splits[1:]] )
__lowerCAmelCase : List[str] = v
self.training_tracker.write_line(lowerCAmelCase )
self.training_tracker.remove_child()
__lowerCAmelCase : int = None
# Evaluation takes a long time so we should force the next update.
__lowerCAmelCase : str = True
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , **lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = None
| 218
| 0
|
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _SCREAMING_SNAKE_CASE:
def __init__( self : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : int=True , UpperCamelCase_ : List[Any]=99 , UpperCamelCase_ : List[str]=64 , UpperCamelCase_ : List[str]=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : List[str]=37 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Optional[Any]=5_12 , UpperCamelCase_ : Union[str, Any]=16 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[int] = parent
SCREAMING_SNAKE_CASE__ :Any = batch_size
SCREAMING_SNAKE_CASE__ :Tuple = seq_length
SCREAMING_SNAKE_CASE__ :Any = is_training
SCREAMING_SNAKE_CASE__ :int = use_input_mask
SCREAMING_SNAKE_CASE__ :Dict = use_token_type_ids
SCREAMING_SNAKE_CASE__ :str = use_labels
SCREAMING_SNAKE_CASE__ :str = vocab_size
SCREAMING_SNAKE_CASE__ :Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ :List[Any] = embedding_size
SCREAMING_SNAKE_CASE__ :int = num_hidden_layers
SCREAMING_SNAKE_CASE__ :int = num_attention_heads
SCREAMING_SNAKE_CASE__ :Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ :str = hidden_act
SCREAMING_SNAKE_CASE__ :Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ :Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ :Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE__ :Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE__ :Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ :int = num_labels
SCREAMING_SNAKE_CASE__ :Optional[Any] = num_choices
SCREAMING_SNAKE_CASE__ :List[Any] = scope
def __lowerCamelCase ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ :List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ :str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ :Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ :List[str] = None
SCREAMING_SNAKE_CASE__ :int = None
SCREAMING_SNAKE_CASE__ :Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ :Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ :Any = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ :str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : int ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[int] = MegatronBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ :str = MegatronBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Dict:
SCREAMING_SNAKE_CASE__ :Optional[int] = MegatronBertForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Optional[int] = MegatronBertForNextSentencePrediction(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :List[str] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ :Tuple = MegatronBertForPreTraining(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , next_sentence_label=UpperCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ :int = MegatronBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Tuple = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ :List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ :Optional[int] = MegatronBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ :Dict = self.num_labels
SCREAMING_SNAKE_CASE__ :Any = MegatronBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE__ :Tuple = MegatronBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ :List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ :Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ :Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) :Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ :Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : List[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : int = True
# test_resize_embeddings = False
A_ : Dict = False
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def __lowerCamelCase ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ :Dict = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*UpperCamelCase_ )
def __lowerCamelCase ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCamelCase_ )
def __lowerCamelCase ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCamelCase_ )
def __lowerCamelCase ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCamelCase_ )
def __lowerCamelCase ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCamelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCamelCase_ )
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , )
UpperCamelCase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
@unittest.skip('Model is not available.' )
def __lowerCamelCase ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ :Tuple = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ :Dict = os.path.join(os.environ['MYDIR'] , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = MegatronBertModel.from_pretrained(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.half()
SCREAMING_SNAKE_CASE__ :Optional[int] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :str = model(UpperCamelCase_ )[0]
SCREAMING_SNAKE_CASE__ :Dict = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ :List[Any] = output[0, ii, jj]
SCREAMING_SNAKE_CASE__ :List[str] = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ :List[Any] = 'ii={} jj={} a={} b={}'.format(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertTrue(math.isclose(UpperCamelCase_ , UpperCamelCase_ , rel_tol=UpperCamelCase_ , abs_tol=UpperCamelCase_ ) , msg=UpperCamelCase_ )
| 209
|
'''simple docstring'''
import numpy as np
import qiskit
def lowerCamelCase ( UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int | None = None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = np.random.default_rng(seed=UpperCAmelCase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
SCREAMING_SNAKE_CASE__ :Optional[int] = 6 * key_len
# Measurement basis for Alice's qubits.
SCREAMING_SNAKE_CASE__ :Union[str, Any] = rng.integers(2 , size=UpperCAmelCase__ )
# The set of states Alice will prepare.
SCREAMING_SNAKE_CASE__ :List[Any] = rng.integers(2 , size=UpperCAmelCase__ )
# Measurement basis for Bob's qubits.
SCREAMING_SNAKE_CASE__ :str = rng.integers(2 , size=UpperCAmelCase__ )
# Quantum Circuit to simulate BB84
SCREAMING_SNAKE_CASE__ :int = qiskit.QuantumCircuit(UpperCAmelCase__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(UpperCAmelCase__ ):
if alice_state[index] == 1:
bbaa_circ.x(UpperCAmelCase__ )
if alice_basis[index] == 1:
bbaa_circ.h(UpperCAmelCase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(UpperCAmelCase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(UpperCAmelCase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
SCREAMING_SNAKE_CASE__ :str = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
SCREAMING_SNAKE_CASE__ :int = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1 , seed_simulator=UpperCAmelCase__ )
# Returns the result of measurement.
SCREAMING_SNAKE_CASE__ :List[Any] = job.result().get_counts(UpperCAmelCase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
SCREAMING_SNAKE_CASE__ :Any = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
SCREAMING_SNAKE_CASE__ :Optional[Any] = gen_key[:key_len] if len(UpperCAmelCase__ ) >= key_len else gen_key.ljust(UpperCAmelCase__ , '0' )
return key
if __name__ == "__main__":
print(f"The generated key is : {bbaa(8, seed=0)}")
from doctest import testmod
testmod()
| 209
| 1
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a__ :
def __init__(self : Optional[Any], __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : str=13, __UpperCAmelCase : List[Any]=10, __UpperCAmelCase : Any=3, __UpperCAmelCase : Dict=2, __UpperCAmelCase : int=2, __UpperCAmelCase : List[Any]=2, __UpperCAmelCase : Optional[int]=True, __UpperCAmelCase : Union[str, Any]=True, __UpperCAmelCase : int=32, __UpperCAmelCase : Union[str, Any]=5, __UpperCAmelCase : int=4, __UpperCAmelCase : Tuple=37, __UpperCAmelCase : Any="gelu", __UpperCAmelCase : Union[str, Any]=0.1, __UpperCAmelCase : int=0.1, __UpperCAmelCase : int=10, __UpperCAmelCase : Optional[Any]=0.02, __UpperCAmelCase : int=0.9, __UpperCAmelCase : Any=None, ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Tuple = tubelet_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_frames
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_ratio
SCREAMING_SNAKE_CASE : Any = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
SCREAMING_SNAKE_CASE : List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : List[str] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
SCREAMING_SNAKE_CASE : Optional[int] = int(mask_ratio * self.seq_length )
def lowercase__ (self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ (self : str ) -> str:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__UpperCAmelCase, initializer_range=self.initializer_range, )
def lowercase__ (self : Dict, __UpperCAmelCase : int, __UpperCAmelCase : Tuple, __UpperCAmelCase : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = VideoMAEModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Optional[int], __UpperCAmelCase : Optional[int], __UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = VideoMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE : Tuple = torch.ones((self.num_masks,) )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE : Tuple = mask.expand(self.batch_size, -1 ).bool()
SCREAMING_SNAKE_CASE : Dict = model(__UpperCAmelCase, __UpperCAmelCase )
# model only returns predictions for masked patches
SCREAMING_SNAKE_CASE : Optional[Any] = mask.sum().item()
SCREAMING_SNAKE_CASE : List[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowercase__ (self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( _lowercase, _lowercase, unittest.TestCase ):
__magic_name__ : List[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__magic_name__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__magic_name__ : Optional[int] = False
__magic_name__ : int = False
__magic_name__ : List[Any] = False
__magic_name__ : int = False
def lowercase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = VideoMAEModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self, config_class=__UpperCAmelCase, has_text_modality=__UpperCAmelCase, hidden_size=37 )
def lowercase__ (self : Tuple, __UpperCAmelCase : Any, __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : int=False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(__UpperCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
SCREAMING_SNAKE_CASE : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE : Union[str, Any] = mask.expand(self.model_tester.batch_size, -1 ).bool()
SCREAMING_SNAKE_CASE : int = bool_masked_pos.to(__UpperCAmelCase )
if return_labels:
if model_class in [
*get_values(__UpperCAmelCase ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__UpperCAmelCase )
return inputs_dict
def lowercase__ (self : str ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def lowercase__ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
def lowercase__ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase, nn.Linear ) )
def lowercase__ (self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __UpperCAmelCase )
def lowercase__ (self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
@slow
def lowercase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = VideoMAEModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowercase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE : List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(__UpperCAmelCase, __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(__UpperCAmelCase, __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
SCREAMING_SNAKE_CASE : Dict = len(__UpperCAmelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase, __UpperCAmelCase ) )
self.assertEqual(out_len + 1, len(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def lowercase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase : Tuple, __UpperCAmelCase : Optional[int], __UpperCAmelCase : Dict ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(__UpperCAmelCase, __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.hidden_states
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__UpperCAmelCase ), __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE : Any = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
check_hidden_states_output(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def __lowercase ():
SCREAMING_SNAKE_CASE : Union[str, Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
SCREAMING_SNAKE_CASE : Any = np.load(_SCREAMING_SNAKE_CASE )
return list(_SCREAMING_SNAKE_CASE )
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowercase__ (self : Tuple ) -> str:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_video()
SCREAMING_SNAKE_CASE : int = image_processor(__UpperCAmelCase, return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = torch.tensor([0.3669, -0.0688, -0.2421] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __UpperCAmelCase, atol=1e-4 ) )
@slow
def lowercase__ (self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_video()
SCREAMING_SNAKE_CASE : Tuple = image_processor(__UpperCAmelCase, return_tensors='''pt''' ).to(__UpperCAmelCase )
# add boolean mask, indicating which patches to mask
SCREAMING_SNAKE_CASE : int = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''', filename='''bool_masked_pos.pt''' )
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([1, 1408, 1536] )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]], device=__UpperCAmelCase )
self.assertEqual(outputs.logits.shape, __UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], __UpperCAmelCase, atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.5142], device=__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.loss, __UpperCAmelCase, atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
SCREAMING_SNAKE_CASE : Dict = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''', norm_pix_loss=__UpperCAmelCase ).to(
__UpperCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(torch.tensor([0.6469] ), device=__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.loss, __UpperCAmelCase, atol=1e-4 ) )
| 708
|
'''simple docstring'''
from __future__ import annotations
snake_case_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __lowercase (_SCREAMING_SNAKE_CASE :list[list[int]] , _SCREAMING_SNAKE_CASE :list[int] , _SCREAMING_SNAKE_CASE :list[int] , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :list[list[int]] , ):
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the reference grid
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the action grid
SCREAMING_SNAKE_CASE : Union[str, Any] = init[0]
SCREAMING_SNAKE_CASE : Optional[Any] = init[1]
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE : Union[str, Any] = [[f, g, x, y]]
SCREAMING_SNAKE_CASE : List[str] = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE : Any = False # flag set if we can't find expand
while not found and not resign:
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE : List[str] = cell.pop()
SCREAMING_SNAKE_CASE : List[str] = next_cell[2]
SCREAMING_SNAKE_CASE : Dict = next_cell[3]
SCREAMING_SNAKE_CASE : str = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE : Dict = True
else:
for i in range(len(_SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE : Optional[Any] = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = g + cost
SCREAMING_SNAKE_CASE : Any = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = i
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[Any] = goal[0]
SCREAMING_SNAKE_CASE : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE : str = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE : str = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE : Tuple = xa
SCREAMING_SNAKE_CASE : Dict = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(_SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
snake_case_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
snake_case_ = [0, 0]
# all coordinates are given in format [y,x]
snake_case_ = [len(grid) - 1, len(grid[0]) - 1]
snake_case_ = 1
# the cost map which pushes the path closer to the goal
snake_case_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
snake_case_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
snake_case_ = 99
snake_case_ , snake_case_ = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 355
| 0
|
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( _lowercase ):
"""simple docstring"""
lowerCAmelCase = ["image_processor", "tokenizer"]
lowerCAmelCase = "AutoImageProcessor"
lowerCAmelCase = "AutoTokenizer"
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : List[str]=None , **SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , SCREAMING_SNAKE_CASE , )
lowerCAmelCase = kwargs.pop("feature_extractor" )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.image_processor
lowerCAmelCase = False
def __call__( self : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase = kwargs.pop("images" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = kwargs.pop("text" , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase = self.image_processor(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is not None:
lowerCAmelCase = self.tokenizer(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase = encodings['''input_ids''']
return inputs
def __A ( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Dict , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@contextmanager
def __A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer
yield
lowerCAmelCase = self.image_processor
lowerCAmelCase = False
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=None ) -> str:
"""simple docstring"""
if added_vocab is None:
lowerCAmelCase = self.tokenizer.get_added_vocab()
lowerCAmelCase = {}
while tokens:
lowerCAmelCase = re.search(R"<s_(.*?)>" , SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
lowerCAmelCase = start_token.group(1 )
lowerCAmelCase = re.search(Rf"</s_{key}>" , SCREAMING_SNAKE_CASE , re.IGNORECASE )
lowerCAmelCase = start_token.group()
if end_token is None:
lowerCAmelCase = tokens.replace(SCREAMING_SNAKE_CASE , "" )
else:
lowerCAmelCase = end_token.group()
lowerCAmelCase = re.escape(SCREAMING_SNAKE_CASE )
lowerCAmelCase = re.escape(SCREAMING_SNAKE_CASE )
lowerCAmelCase = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
lowerCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCAmelCase = self.tokenajson(SCREAMING_SNAKE_CASE , is_inner_value=SCREAMING_SNAKE_CASE , added_vocab=SCREAMING_SNAKE_CASE )
if value:
if len(SCREAMING_SNAKE_CASE ) == 1:
lowerCAmelCase = value[0]
lowerCAmelCase = value
else: # leaf nodes
lowerCAmelCase = []
for leaf in content.split(R"<sep/>" ):
lowerCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
lowerCAmelCase = output[key][0]
lowerCAmelCase = tokens[tokens.find(SCREAMING_SNAKE_CASE ) + len(SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=SCREAMING_SNAKE_CASE , added_vocab=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __A ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __A ( self : Optional[int] ) -> str:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , SCREAMING_SNAKE_CASE , )
return self.image_processor
| 649
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__(self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , A=1 / 2_5_5 , A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ : int = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCamelCase_ : Any = parent
lowerCamelCase_ : Tuple = batch_size
lowerCamelCase_ : Union[str, Any] = num_channels
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : List[Any] = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : Optional[int] = do_normalize
lowerCamelCase_ : Union[str, Any] = image_mean
lowerCamelCase_ : str = image_std
lowerCamelCase_ : List[Any] = do_rescale
lowerCamelCase_ : str = rescale_factor
lowerCamelCase_ : Optional[int] = do_pad
def UpperCAmelCase__ (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ (self , A , A=False ):
if not batched:
lowerCamelCase_ : Any = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase_, lowerCamelCase_ : int = image.size
else:
lowerCamelCase_, lowerCamelCase_ : Any = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ : str = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowerCamelCase_ : Union[str, Any] = self.size['''shortest_edge''']
lowerCamelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase_ : Any = self.size['''shortest_edge''']
lowerCamelCase_ : Tuple = self.size['''shortest_edge''']
else:
lowerCamelCase_ : Optional[Any] = []
for image in image_inputs:
lowerCamelCase_, lowerCamelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ : Dict = max(A , key=lambda A : item[0] )[0]
lowerCamelCase_ : int = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = DetaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = DetaImageProcessingTester(self )
@property
def UpperCAmelCase__ (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''do_rescale''' ) )
self.assertTrue(hasattr(A , '''do_pad''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , A )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_, lowerCamelCase_ : Any = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase_ : Tuple = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : List[str] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : Optional[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ (self ):
# prepare image and target
lowerCamelCase_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowerCamelCase_ : Any = json.loads(f.read() )
lowerCamelCase_ : str = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCamelCase_ : Optional[Any] = DetaImageProcessor()
lowerCamelCase_ : Optional[int] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase_ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase_ : int = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase_ : List[str] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowerCamelCase_ : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase_ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def UpperCAmelCase__ (self ):
# prepare image, target and masks_path
lowerCamelCase_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowerCamelCase_ : Tuple = json.loads(f.read() )
lowerCamelCase_ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCamelCase_ : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCamelCase_ : Any = DetaImageProcessor(format='''coco_panoptic''' )
lowerCamelCase_ : Dict = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase_ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase_ : int = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase_ : Dict = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowerCamelCase_ : Tuple = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowerCamelCase_ : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase_ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 422
| 0
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCamelCase = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
class lowercase_ (_UpperCAmelCase ):
A__ : int = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = ['''input_ids''', '''attention_mask''']
A__ : Any = TaTokenizer
A__ : List[int] = []
def __init__( self , a_=None , a_=None , a_="</s>" , a_="<unk>" , a_="<pad>" , a_=1_0_0 , a_=None , **a_ , ) ->Any:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_a = [f'''<extra_id_{i}>''' for i in range(a_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_a = len(set(filter(lambda a_ : bool("extra_id_" in str(a_ ) ) , a_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
a_ , tokenizer_file=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , extra_ids=a_ , additional_special_tokens=a_ , **a_ , )
_a = vocab_file
_a = False if not self.vocab_file else True
_a = extra_ids
@staticmethod
def lowerCamelCase__ ( a_ , a_ , a_ ) ->List[Any]:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_a = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a_ , )
return max_model_length
def lowerCamelCase__ ( self , a_ , a_ = None ) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def lowerCamelCase__ ( self , a_ , a_ = None ) ->List[int]:
'''simple docstring'''
_a = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_a = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowerCamelCase__ ( self , a_ , a_ = None ) ->List[int]:
'''simple docstring'''
_a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
return list(
set(filter(lambda a_ : bool(re.search(R"<extra_id_\d+>" , a_ ) ) is not None , self.additional_special_tokens ) ) )
def lowerCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
return [self.convert_tokens_to_ids(a_ ) for token in self.get_sentinel_tokens()]
| 612
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase_ (_UpperCAmelCase ):
def __init__( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=a_ , speech_processor=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , feature_extractor=a_ , )
def lowerCamelCase__ ( self , a_ = "auto" ) ->Optional[int]:
'''simple docstring'''
if slice_size == "auto":
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def lowerCamelCase__ ( self ) ->Any:
'''simple docstring'''
self.enable_attention_slicing(a_ )
@torch.no_grad()
def __call__( self , a_ , a_=1_6_0_0_0 , a_ = 5_1_2 , a_ = 5_1_2 , a_ = 5_0 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , **a_ , ) ->str:
'''simple docstring'''
_a = self.speech_processor.feature_extractor(
a_ , return_tensors="pt" , sampling_rate=a_ ).input_features.to(self.device )
_a = self.speech_model.generate(a_ , max_length=4_8_0_0_0_0 )
_a = self.speech_processor.tokenizer.batch_decode(a_ , skip_special_tokens=a_ , normalize=a_ )[
0
]
if isinstance(a_ , a_ ):
_a = 1
elif isinstance(a_ , a_ ):
_a = len(a_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(a_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(a_ )}.''' )
# get prompt text embeddings
_a = self.tokenizer(
a_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_a = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_a = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_a = text_input_ids[:, : self.tokenizer.model_max_length]
_a = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_a , _a , _a = text_embeddings.shape
_a = text_embeddings.repeat(1 , a_ , 1 )
_a = text_embeddings.view(bs_embed * num_images_per_prompt , a_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = 42
if negative_prompt is None:
_a = [""] * batch_size
elif type(a_ ) is not type(a_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !='''
f''' {type(a_ )}.''' )
elif isinstance(a_ , a_ ):
_a = [negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
_a = negative_prompt
_a = text_input_ids.shape[-1]
_a = self.tokenizer(
a_ , padding="max_length" , max_length=a_ , truncation=a_ , return_tensors="pt" , )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_a = uncond_embeddings.shape[1]
_a = uncond_embeddings.repeat(1 , a_ , 1 )
_a = uncond_embeddings.view(batch_size * num_images_per_prompt , a_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_a = torch.randn(a_ , generator=a_ , device="cpu" , dtype=a_ ).to(
self.device )
else:
_a = torch.randn(a_ , generator=a_ , device=self.device , dtype=a_ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_a = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
_a = self.unet(a_ , a_ , encoder_hidden_states=a_ ).sample
# perform guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a_ , a_ , a_ )
_a = 1 / 0.18_215 * latents
_a = self.vae.decode(a_ ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_a = self.numpy_to_pil(a_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=a_ , nsfw_content_detected=a_ )
| 612
| 1
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
snake_case_ , snake_case_ , snake_case_ = False, False, False
@dataclass
class a__ :
__magic_name__ : Optional[int] = None
__magic_name__ : bool = True
__magic_name__ : bool = True
__magic_name__ : Optional[str] = None
# Automatically constructed
__magic_name__ : ClassVar[str] = "dict"
__magic_name__ : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__magic_name__ : str = field(default="Audio", init=_lowercase, repr=_lowercase )
def __call__(self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.pa_type
def lowercase__ (self : int, __UpperCAmelCase : Union[str, bytes, dict] ) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__UpperCAmelCase, __UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
SCREAMING_SNAKE_CASE : Optional[int] = BytesIO()
sf.write(__UpperCAmelCase, value['''array'''], value['''sampling_rate'''], format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
SCREAMING_SNAKE_CASE : int = np.frombuffer(value['''bytes'''], dtype=np.intaa ).astype(np.floataa ) / 32767
else:
SCREAMING_SNAKE_CASE : List[Any] = np.memmap(value['''path'''], dtype='''h''', mode='''r''' ).astype(np.floataa ) / 32767
SCREAMING_SNAKE_CASE : int = BytesIO(bytes() )
sf.write(__UpperCAmelCase, __UpperCAmelCase, value['''sampling_rate'''], format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowercase__ (self : Optional[int], __UpperCAmelCase : dict, __UpperCAmelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
SCREAMING_SNAKE_CASE : Optional[int] = xsplitext(__UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
SCREAMING_SNAKE_CASE : List[str] = token_per_repo_id or {}
SCREAMING_SNAKE_CASE : Any = path.split('''::''' )[-1]
try:
SCREAMING_SNAKE_CASE : Any = string_to_dict(__UpperCAmelCase, config.HUB_DATASETS_URL )['''repo_id''']
SCREAMING_SNAKE_CASE : List[str] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
SCREAMING_SNAKE_CASE : Optional[int] = None
with xopen(__UpperCAmelCase, '''rb''', use_auth_token=__UpperCAmelCase ) as f:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sf.read(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = sf.read(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = array.T
if self.mono:
SCREAMING_SNAKE_CASE : str = librosa.to_mono(__UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
SCREAMING_SNAKE_CASE : Optional[Any] = librosa.resample(__UpperCAmelCase, orig_sr=__UpperCAmelCase, target_sr=self.sampling_rate )
SCREAMING_SNAKE_CASE : int = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowercase__ (self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def lowercase__ (self : Any, __UpperCAmelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE : Tuple = pa.array([None] * len(__UpperCAmelCase ), type=pa.binary() )
SCREAMING_SNAKE_CASE : List[str] = pa.StructArray.from_arrays([bytes_array, storage], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE : Optional[Any] = pa.array([None] * len(__UpperCAmelCase ), type=pa.string() )
SCREAMING_SNAKE_CASE : Tuple = pa.StructArray.from_arrays([storage, path_array], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
SCREAMING_SNAKE_CASE : Tuple = pa.array([Audio().encode_example(__UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
SCREAMING_SNAKE_CASE : Tuple = storage.field('''bytes''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = pa.array([None] * len(__UpperCAmelCase ), type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
SCREAMING_SNAKE_CASE : Tuple = storage.field('''path''' )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = pa.array([None] * len(__UpperCAmelCase ), type=pa.string() )
SCREAMING_SNAKE_CASE : Dict = pa.StructArray.from_arrays([bytes_array, path_array], ['''bytes''', '''path'''], mask=storage.is_null() )
return array_cast(__UpperCAmelCase, self.pa_type )
def lowercase__ (self : List[str], __UpperCAmelCase : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(__UpperCAmelCase : Tuple ):
with xopen(__UpperCAmelCase, '''rb''' ) as f:
SCREAMING_SNAKE_CASE : int = f.read()
return bytes_
SCREAMING_SNAKE_CASE : List[str] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
SCREAMING_SNAKE_CASE : Any = pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()], type=pa.string(), )
SCREAMING_SNAKE_CASE : List[str] = pa.StructArray.from_arrays([bytes_array, path_array], ['''bytes''', '''path'''], mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase, self.pa_type )
| 507
|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] ):
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
class a__ :
def __init__(self : Optional[int], __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = metric_id
class a__ :
__magic_name__ : List[Any] = [MetricMock(_lowercase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def lowercase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def __lowercase (_SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :int ):
if "tmp_path" in args:
SCREAMING_SNAKE_CASE : List[Any] = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(_SCREAMING_SNAKE_CASE , match='''https://huggingface.co/docs/evaluate''' ):
func(*_SCREAMING_SNAKE_CASE )
| 507
| 1
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowerCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
if config is None:
assert isinstance(self.model , lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
snake_case = self.model.config
else:
snake_case = config
snake_case = data_args
snake_case = self.config.tgt_vocab_size if isinstance(self.config , lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case = label_smoothed_nll_loss
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if self.optimizer is None:
snake_case = ["bias", "LayerNorm.weight"]
snake_case = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
snake_case = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case = Adafactor
snake_case = {"scale_parameter": False, "relative_step": False}
else:
snake_case = AdamW
snake_case = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
snake_case = self.args.learning_rate
if self.sharded_ddp:
snake_case = OSS(
params=lowerCAmelCase , optim=lowerCAmelCase , **lowerCAmelCase , )
else:
snake_case = optimizer_cls(lowerCAmelCase , **lowerCAmelCase )
if self.lr_scheduler is None:
snake_case = self._get_lr_scheduler(lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowerCAmelCase )
return scheduler
def snake_case ( self ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case = model(**lowerCAmelCase , use_cache=lowerCAmelCase )[0]
snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case = model(**lowerCAmelCase , labels=lowerCAmelCase , use_cache=lowerCAmelCase )[:2]
else:
# compute label smoothed loss
snake_case = model(**lowerCAmelCase , use_cache=lowerCAmelCase )[0]
snake_case = torch.nn.functional.log_softmax(lowerCAmelCase , dim=-1 )
snake_case = self.loss_fn(lowerCAmelCase , lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = inputs.pop('labels' )
snake_case = self._compute_loss(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return loss
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = self._prepare_inputs(lowerCAmelCase )
snake_case = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(lowerCAmelCase , gen_kwargs['max_length'] )
snake_case = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
snake_case = self._compute_loss(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
snake_case = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(lowerCAmelCase , gen_kwargs['max_length'] )
return (loss, logits, labels)
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F""" padded to `max_length`={max_length}""" )
snake_case = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case = tensor
return padded_tensor
| 706
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 104
| 0
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Dict = '▁'
_a : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class UpperCamelCase_ ( __UpperCamelCase ,unittest.TestCase ):
"""simple docstring"""
A = BertGenerationTokenizer
A = False
A = True
def lowerCamelCase_ ( self ):
super().setUp()
__lowerCamelCase = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self ):
__lowerCamelCase = """<s>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCAmelCase ) , 1_0_0_2 )
def lowerCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowerCamelCase_ ( self ):
__lowerCamelCase = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCamelCase_ ( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = """Hello World!"""
__lowerCamelCase = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowerCamelCase = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def lowerCamelCase_ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__lowerCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
__lowerCamelCase = """ """.join(UpperCAmelCase )
__lowerCamelCase = self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors="""pt""" , return_token_type_ids=UpperCAmelCase )
__lowerCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=UpperCAmelCase )
__lowerCamelCase = BertGenerationConfig()
__lowerCamelCase = BertGenerationEncoder(UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def lowerCamelCase_ ( self ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 479
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_a : int = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_a : Tuple = concatenate_datasets
_a : Optional[int] = DownloadConfig
_a : Any = DownloadManager
_a : Dict = DownloadMode
_a : str = DownloadConfig
_a : int = DownloadMode
_a : Tuple = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 479
| 1
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# Initialise PyTorch model
__magic_name__ : int =FunnelConfig.from_json_file(lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
__magic_name__ : str =FunnelBaseModel(lowerCamelCase ) if base_model else FunnelModel(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
UpperCAmelCase_ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 717
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self :List[str] , __snake_case :str , __snake_case :List[Any]=7 , __snake_case :List[str]=3 , __snake_case :int=30 , __snake_case :int=4_00 , __snake_case :List[str]=True , __snake_case :List[str]=None , __snake_case :Tuple=True , __snake_case :Any=[0.5, 0.5, 0.5] , __snake_case :Any=[0.5, 0.5, 0.5] , __snake_case :Union[str, Any]=True , __snake_case :str=1 / 2_55 , __snake_case :int=True , ):
'''simple docstring'''
__magic_name__ : Optional[int] =size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__magic_name__ : str =parent
__magic_name__ : Tuple =batch_size
__magic_name__ : Optional[Any] =num_channels
__magic_name__ : Optional[int] =min_resolution
__magic_name__ : Optional[Any] =max_resolution
__magic_name__ : Optional[Any] =do_resize
__magic_name__ : Any =size
__magic_name__ : str =do_normalize
__magic_name__ : str =image_mean
__magic_name__ : List[Any] =image_std
__magic_name__ : Optional[int] =do_rescale
__magic_name__ : List[str] =rescale_factor
__magic_name__ : Optional[int] =do_pad
def A__ ( self :Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self :List[str] , __snake_case :Any , __snake_case :List[Any]=False ):
'''simple docstring'''
if not batched:
__magic_name__ : Any =image_inputs[0]
if isinstance(__snake_case , Image.Image ):
__magic_name__ , __magic_name__ : Dict =image.size
else:
__magic_name__ , __magic_name__ : Dict =image.shape[1], image.shape[2]
if w < h:
__magic_name__ : Any =int(self.size["""shortest_edge"""] * h / w )
__magic_name__ : Dict =self.size["""shortest_edge"""]
elif w > h:
__magic_name__ : Union[str, Any] =self.size["""shortest_edge"""]
__magic_name__ : Optional[int] =int(self.size["""shortest_edge"""] * w / h )
else:
__magic_name__ : Any =self.size["""shortest_edge"""]
__magic_name__ : List[str] =self.size["""shortest_edge"""]
else:
__magic_name__ : List[str] =[]
for image in image_inputs:
__magic_name__ , __magic_name__ : Tuple =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : Optional[int] =max(__snake_case , key=lambda __snake_case : item[0] )[0]
__magic_name__ : Tuple =max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = DetaImageProcessor if is_vision_available() else None
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : int =DetaImageProcessingTester(self )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """image_mean""" ) )
self.assertTrue(hasattr(__snake_case , """image_std""" ) )
self.assertTrue(hasattr(__snake_case , """do_normalize""" ) )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """do_rescale""" ) )
self.assertTrue(hasattr(__snake_case , """do_pad""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , __snake_case )
def A__ ( self :Any ):
'''simple docstring'''
pass
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__magic_name__ : Tuple =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : str =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ , __magic_name__ : int =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
__magic_name__ : Dict =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
__magic_name__ : str =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : int =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : List[str] =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : Any =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__magic_name__ : Tuple =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : int =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Dict =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : Tuple =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__magic_name__ : Optional[int] =json.loads(f.read() )
__magic_name__ : str ={"""image_id""": 3_97_69, """annotations""": target}
# encode them
__magic_name__ : Optional[Any] =DetaImageProcessor()
__magic_name__ : Any =image_processing(images=__snake_case , annotations=__snake_case , return_tensors="""pt""" )
# verify pixel values
__magic_name__ : Tuple =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , __snake_case )
__magic_name__ : Optional[int] =torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
__magic_name__ : Dict =torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __snake_case ) )
# verify boxes
__magic_name__ : Optional[int] =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __snake_case )
__magic_name__ : int =torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __snake_case , atol=1E-3 ) )
# verify image_id
__magic_name__ : Any =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __snake_case ) )
# verify is_crowd
__magic_name__ : Optional[Any] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __snake_case ) )
# verify class_labels
__magic_name__ : List[str] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __snake_case ) )
# verify orig_size
__magic_name__ : Optional[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __snake_case ) )
# verify size
__magic_name__ : Tuple =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __snake_case ) )
@slow
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__magic_name__ : str =json.loads(f.read() )
__magic_name__ : Optional[Any] ={"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__magic_name__ : List[str] =pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__magic_name__ : int =DetaImageProcessor(format="""coco_panoptic""" )
__magic_name__ : Tuple =image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors="""pt""" )
# verify pixel values
__magic_name__ : Union[str, Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , __snake_case )
__magic_name__ : str =torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
__magic_name__ : str =torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __snake_case ) )
# verify boxes
__magic_name__ : Dict =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __snake_case )
__magic_name__ : Optional[Any] =torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __snake_case , atol=1E-3 ) )
# verify image_id
__magic_name__ : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __snake_case ) )
# verify is_crowd
__magic_name__ : int =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __snake_case ) )
# verify class_labels
__magic_name__ : List[Any] =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __snake_case ) )
# verify masks
__magic_name__ : List[Any] =82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __snake_case )
# verify orig_size
__magic_name__ : Union[str, Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __snake_case ) )
# verify size
__magic_name__ : List[Any] =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __snake_case ) )
| 367
| 0
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Any:
if is_torch_version("<", "2.0.0" ) or not hasattr(SCREAMING_SNAKE_CASE__, "_dynamo" ):
return False
return isinstance(SCREAMING_SNAKE_CASE__, torch._dynamo.eval_frame.OptimizedModule )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = True ) -> List[str]:
a_ : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
a_ : Tuple = is_compiled_module(SCREAMING_SNAKE_CASE__ )
if is_compiled:
a_ : Dict = model
a_ : Any = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
a_ : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
a_ : str = getattr(SCREAMING_SNAKE_CASE__, "forward" )
a_ : List[str] = model.__dict__.pop("_original_forward", SCREAMING_SNAKE_CASE__ )
if original_forward is not None:
while hasattr(SCREAMING_SNAKE_CASE__, "__wrapped__" ):
a_ : Any = forward.__wrapped__
if forward == original_forward:
break
a_ : Tuple = forward
if getattr(SCREAMING_SNAKE_CASE__, "_converted_to_transformer_engine", SCREAMING_SNAKE_CASE__ ):
convert_model(SCREAMING_SNAKE_CASE__, to_transformer_engine=SCREAMING_SNAKE_CASE__ )
if is_compiled:
a_ : Optional[int] = model
a_ : Optional[Any] = compiled_model
return model
def lowerCAmelCase_ ( ) -> List[str]:
PartialState().wait_for_everyone()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif PartialState().local_process_index == 0:
torch.save(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
@contextmanager
def lowerCAmelCase_ ( **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
for key, value in kwargs.items():
a_ : str = str(SCREAMING_SNAKE_CASE__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
if not hasattr(SCREAMING_SNAKE_CASE__, "__qualname__" ) and not hasattr(SCREAMING_SNAKE_CASE__, "__name__" ):
a_ : List[str] = getattr(SCREAMING_SNAKE_CASE__, "__class__", SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__, "__qualname__" ):
return obj.__qualname__
if hasattr(SCREAMING_SNAKE_CASE__, "__name__" ):
return obj.__name__
return str(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> str:
for key, value in source.items():
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
a_ : Optional[int] = destination.setdefault(SCREAMING_SNAKE_CASE__, {} )
merge_dicts(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
else:
a_ : Union[str, Any] = value
return destination
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ = None ) -> Union[str, Any]:
if port is None:
a_ : Dict = 29_500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 237
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655
| 0
|
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
A_ : List[str] = str(id_ )
A_ : Union[str, Any] = None
A_ : List[Any] = None
A_ : str = []
A_ : str = {} # {vertex:distance}
def __lt__( self , lowercase ):
"""simple docstring"""
return self.key < other.key
def __repr__( self ):
"""simple docstring"""
return self.id
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
self.neighbors.append(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = weight
def UpperCamelCase ( __lowercase : Dict ,__lowercase : int ,__lowercase : int ,__lowercase : List[str] ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,__lowercase )
graph[b - 1].add_edge(graph[a - 1] ,__lowercase )
def UpperCamelCase ( __lowercase : list ,__lowercase : Vertex ):
'''simple docstring'''
A_ : str = []
for u in graph:
A_ : Optional[Any] = math.inf
A_ : List[str] = None
A_ : Any = 0
A_ : Any = graph[:]
while q:
A_ : Tuple = min(__lowercase )
q.remove(__lowercase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A_ : int = u
A_ : Any = u.edges[v.id]
for i in range(1 ,len(__lowercase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase ( __lowercase : list ,__lowercase : Vertex ):
'''simple docstring'''
for u in graph:
A_ : Dict = math.inf
A_ : Optional[Any] = None
A_ : List[str] = 0
A_ : List[Any] = list(__lowercase )
hq.heapify(__lowercase )
while h:
A_ : str = hq.heappop(__lowercase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A_ : Any = u
A_ : Tuple = u.edges[v.id]
hq.heapify(__lowercase )
for i in range(1 ,len(__lowercase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCAmelCase = logging.getLogger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''summarization'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ROUGE_KEYS
lowerCamelCase_ = '''rouge2'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
A_ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
A_ : List[str] = Path(self.output_dir ) / 'metrics.json'
A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
A_ : str = 0
A_ : Any = defaultdict(lowercase )
A_ : Union[str, Any] = self.config.model_type
A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
A_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ : Optional[Any] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ : Tuple = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ : int = get_git_info()['repo_sha']
A_ : int = hparams.num_workers
A_ : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ : Any = self.decoder_start_token_id
A_ : str = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
A_ : Union[str, Any] = False
A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ : int = self.hparams.eval_max_gen_length
else:
A_ : List[Any] = self.model.config.max_length
A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
A_ : int = True
return readable_batch
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
return self.model(lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer.pad_token_id
A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask']
A_ : str = batch['labels']
if isinstance(self.model , lowercase ):
A_ : Optional[int] = self.model._shift_right(lowercase )
else:
A_ : Any = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ : Optional[Any] = decoder_input_ids
self.save_readable_batch(lowercase )
A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
A_ : Dict = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 )
A_ , A_ : Any = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self._step(lowercase )
A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
A_ : str = batch['input_ids'].shape[0]
A_ : Any = batch['input_ids'].eq(self.pad ).sum()
A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase="val" ):
"""simple docstring"""
self.step_count += 1
A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ : Dict = losses['loss']
A_ : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
A_ : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ : Tuple = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
A_ : Dict = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_rouge(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ : Optional[int] = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ : int = (time.time() - ta) / batch['input_ids'].shape[0]
A_ : List[str] = self.ids_to_clean_text(lowercase )
A_ : List[str] = self.ids_to_clean_text(batch['labels'] )
A_ : List[Any] = self._step(lowercase )
A_ : int = dict(zip(self.loss_names , lowercase ) )
A_ : Dict = self.calc_generative_metrics(lowercase , lowercase )
A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.validation_epoch_end(lowercase , prefix='test' )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.n_obs[type_path]
A_ : List[Any] = self.target_lens[type_path]
A_ : str = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
A_ : Optional[int] = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase )
parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase )
parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
'--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''translation'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ['''bleu''']
lowerCamelCase_ = '''bleu'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , **lowercase )
A_ : List[Any] = hparams.src_lang
A_ : str = hparams.tgt_lang
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_bleu(lowercase , lowercase )
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase ,expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ : SummarizationModule = SummarizationModule(__lowercase )
else:
A_ : SummarizationModule = TranslationModule(__lowercase )
A_ : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
A_ : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase )
A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
A_ : str = False
A_ : Dict = args.val_metric == 'loss'
A_ : pl.Trainer = generic_train(
__lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,)
pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
A_ : Optional[Any] = ''
A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) )
if checkpoints:
A_ : List[Any] = checkpoints[-1]
A_ : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
_UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
_UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCAmelCase = parser.parse_args()
main(args)
| 70
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
a = logging.get_logger(__name__)
a = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : List[str] = 'bloom'
__UpperCamelCase : Optional[Any] = ['past_key_values']
__UpperCamelCase : Optional[int] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : Union[str, Any] ,lowerCamelCase : str=25_0880 ,lowerCamelCase : List[Any]=64 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : Tuple=8 ,lowerCamelCase : Union[str, Any]=1E-5 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : str=True ,lowerCamelCase : List[Any]=1 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Optional[int]=0.0 ,lowerCamelCase : List[Any]=0.0 ,lowerCamelCase : Optional[Any]=1 ,lowerCamelCase : Tuple=False ,**lowerCamelCase : str ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab_size
# Backward compatibility with n_embed kwarg
__SCREAMING_SNAKE_CASE = kwargs.pop("""n_embed""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size if n_embed is None else n_embed
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = pretraining_tp
__SCREAMING_SNAKE_CASE = apply_residual_connection_post_layernorm
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = slow_but_exact
super().__init__(bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
class __a ( _snake_case ):
__UpperCamelCase : int = version.parse('1.12' )
def __init__( self : Optional[int] ,lowerCamelCase : PretrainedConfig ,lowerCamelCase : str = "default" ,lowerCamelCase : List[PatchingSpec] = None ,lowerCamelCase : bool = False ,):
'''simple docstring'''
super().__init__(lowerCamelCase ,task=lowerCamelCase ,patching_specs=lowerCamelCase ,use_past=lowerCamelCase )
if not getattr(self._config ,"""pad_token_id""" ,lowerCamelCase ):
# TODO: how to do that better?
__SCREAMING_SNAKE_CASE = 0
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCamelCase ,direction="""inputs""" ,inverted_values_shape=lowerCamelCase )
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self._config.n_head
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return 1E-3
def UpperCAmelCase__ ( self : str ,lowerCamelCase : "PreTrainedTokenizer" ,lowerCamelCase : int = -1 ,lowerCamelCase : int = -1 ,lowerCamelCase : bool = False ,lowerCamelCase : Optional["TensorType"] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super(lowerCamelCase ,self ).generate_dummy_inputs(
lowerCamelCase ,batch_size=lowerCamelCase ,seq_length=lowerCamelCase ,is_pair=lowerCamelCase ,framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE = self._config.hidden_size // self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__SCREAMING_SNAKE_CASE = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__SCREAMING_SNAKE_CASE = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""]
if self.use_past:
__SCREAMING_SNAKE_CASE = ordered_inputs["""attention_mask"""].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase ,lowerCamelCase ,dtype=lowerCamelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return 13
| 109
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = KandinskyInpaintPipeline
__A = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
__A = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
__A = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def _a ( self ) -> str:
'''simple docstring'''
return 32
@property
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _a ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(_lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def _a ( self ) -> List[str]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Any:
'''simple docstring'''
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(_lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(_lowerCAmelCase )
else:
lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _a ( self ) -> str:
'''simple docstring'''
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _a ( self ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 588
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = SwinvaConfig()
__snake_case = swinva_name.split('''_''')
__snake_case = name_split[1]
if "to" in name_split[3]:
__snake_case = int(name_split[3][-3:])
else:
__snake_case = int(name_split[3])
if "to" in name_split[2]:
__snake_case = int(name_split[2][-2:])
else:
__snake_case = int(name_split[2][6:])
if model_size == "tiny":
__snake_case = 96
__snake_case = (2, 2, 6, 2)
__snake_case = (3, 6, 12, 24)
elif model_size == "small":
__snake_case = 96
__snake_case = (2, 2, 18, 2)
__snake_case = (3, 6, 12, 24)
elif model_size == "base":
__snake_case = 1_28
__snake_case = (2, 2, 18, 2)
__snake_case = (4, 8, 16, 32)
else:
__snake_case = 1_92
__snake_case = (2, 2, 18, 2)
__snake_case = (6, 12, 24, 48)
if "to" in swinva_name:
__snake_case = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__snake_case = 2_18_41
__snake_case = '''huggingface/label-files'''
__snake_case = '''imagenet-22k-id2label.json'''
__snake_case = json.load(open(hf_hub_download(snake_case, snake_case, repo_type='''dataset'''), '''r'''))
__snake_case = {int(snake_case): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
else:
__snake_case = 10_00
__snake_case = '''huggingface/label-files'''
__snake_case = '''imagenet-1k-id2label.json'''
__snake_case = json.load(open(hf_hub_download(snake_case, snake_case, repo_type='''dataset'''), '''r'''))
__snake_case = {int(snake_case): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = img_size
__snake_case = num_classes
__snake_case = embed_dim
__snake_case = depths
__snake_case = num_heads
__snake_case = window_size
return config
def SCREAMING_SNAKE_CASE ( snake_case):
if "patch_embed.proj" in name:
__snake_case = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''')
if "patch_embed.norm" in name:
__snake_case = name.replace('''patch_embed.norm''', '''embeddings.norm''')
if "layers" in name:
__snake_case = '''encoder.''' + name
if "attn.proj" in name:
__snake_case = name.replace('''attn.proj''', '''attention.output.dense''')
if "attn" in name:
__snake_case = name.replace('''attn''', '''attention.self''')
if "norm1" in name:
__snake_case = name.replace('''norm1''', '''layernorm_before''')
if "norm2" in name:
__snake_case = name.replace('''norm2''', '''layernorm_after''')
if "mlp.fc1" in name:
__snake_case = name.replace('''mlp.fc1''', '''intermediate.dense''')
if "mlp.fc2" in name:
__snake_case = name.replace('''mlp.fc2''', '''output.dense''')
if "q_bias" in name:
__snake_case = name.replace('''q_bias''', '''query.bias''')
if "k_bias" in name:
__snake_case = name.replace('''k_bias''', '''key.bias''')
if "v_bias" in name:
__snake_case = name.replace('''v_bias''', '''value.bias''')
if "cpb_mlp" in name:
__snake_case = name.replace('''cpb_mlp''', '''continuous_position_bias_mlp''')
if name == "norm.weight":
__snake_case = '''layernorm.weight'''
if name == "norm.bias":
__snake_case = '''layernorm.bias'''
if "head" in name:
__snake_case = name.replace('''head''', '''classifier''')
else:
__snake_case = '''swinv2.''' + name
return name
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case)
if "mask" in key:
continue
elif "qkv" in key:
__snake_case = key.split('''.''')
__snake_case = int(key_split[1])
__snake_case = int(key_split[3])
__snake_case = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[
dim : dim * 2
]
__snake_case = val[-dim:]
else:
__snake_case = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = timm.create_model(snake_case, pretrained=snake_case)
timm_model.eval()
__snake_case = get_swinva_config(snake_case)
__snake_case = SwinvaForImageClassification(snake_case)
model.eval()
__snake_case = convert_state_dict(timm_model.state_dict(), snake_case)
model.load_state_dict(snake_case)
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''', '''-''')))
__snake_case = Image.open(requests.get(snake_case, stream=snake_case).raw)
__snake_case = image_processor(images=snake_case, return_tensors='''pt''')
__snake_case = timm_model(inputs['''pixel_values'''])
__snake_case = model(**snake_case).logits
assert torch.allclose(snake_case, snake_case, atol=1E-3)
print(f"Saving model {swinva_name} to {pytorch_dump_folder_path}")
model.save_pretrained(snake_case)
print(f"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(snake_case)
model.push_to_hub(
repo_path_or_name=Path(snake_case, snake_case), organization='''nandwalritik''', commit_message='''Add model''', )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__lowercase : List[Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 705
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : int ) -> Tuple:
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__snake_case = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_euler''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase ( self : Optional[Any] ) -> Tuple:
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_euler''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowercase ( self : List[str] ) -> Optional[Any]:
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , generator=A_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=A_ , )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.