code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 195
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_attention_heads' ) )
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=3 , snake_case=2 , snake_case=1 , snake_case=16 , snake_case=[128, 256, 384] , snake_case=[4, 6, 8] , snake_case=[2, 3, 4] , snake_case=[16, 16, 16] , snake_case=0 , snake_case=[2, 2, 2] , snake_case=[2, 2, 2] , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=2 , ):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = kernel_size
lowercase = stride
lowercase = padding
lowercase = hidden_sizes
lowercase = num_attention_heads
lowercase = depths
lowercase = key_dim
lowercase = drop_path_rate
lowercase = patch_size
lowercase = attention_ratio
lowercase = mlp_ratio
lowercase = initializer_range
lowercase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase = is_training
lowercase = use_labels
lowercase = num_labels
lowercase = initializer_range
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = LevitModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case )
lowercase = (self.image_size, self.image_size)
lowercase , lowercase = image_size[0], image_size[1]
for _ in range(4 ):
lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = LevitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_UpperCamelCase : Dict = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LevitModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ):
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='Levit does not output attentions' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
def check_hidden_states_output(snake_case , snake_case , snake_case ):
lowercase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(snake_case , snake_case ) )
lowercase = outputs.hidden_states
lowercase = len(self.model_tester.depths ) + 1
self.assertEqual(len(snake_case ) , snake_case )
lowercase = (self.model_tester.image_size, self.model_tester.image_size)
lowercase , lowercase = image_size[0], image_size[1]
for _ in range(4 ):
lowercase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.model_tester.is_training:
return
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase = model_class(snake_case )
model.to(snake_case )
model.train()
lowercase = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
lowercase = model(**snake_case ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase = False
lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase = model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
lowercase = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
lowercase = model(**snake_case ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowercase = problem_type['title']
lowercase = problem_type['num_labels']
lowercase = model_class(snake_case )
model.to(snake_case )
model.train()
lowercase = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if problem_type["num_labels"] > 1:
lowercase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
lowercase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case ) as warning_list:
lowercase = model(**snake_case ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = LevitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase_ ( ):
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
snake_case )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
lowercase = model(**snake_case )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
lowercase = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 195
| 1
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase_ : Dict = random.Random()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=1.0 , lowerCAmelCase=None , lowerCAmelCase=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=4_00 , snake_case__=20_00 , snake_case__=10 , snake_case__=1_60 , snake_case__=8 , snake_case__=0.0 , snake_case__=40_00 , snake_case__=False , snake_case__=True , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = return_attention_mask
UpperCAmelCase = do_normalize
UpperCAmelCase = feature_size
UpperCAmelCase = chunk_length
UpperCAmelCase = hop_length
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , snake_case__=False , snake_case__=False ) -> Union[str, Any]:
"""simple docstring"""
def _flatten(snake_case__ ):
return list(itertools.chain(*snake_case__ ) )
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(snake_case__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase_ ( a_ , unittest.TestCase ):
_A : Union[str, Any] = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase = feat_extract_first.save_pretrained(snake_case__ )[0]
check_json_file_has_correct_format(snake_case__ )
UpperCAmelCase = self.feature_extraction_class.from_pretrained(snake_case__ )
UpperCAmelCase = feat_extract_first.to_dict()
UpperCAmelCase = feat_extract_second.to_dict()
UpperCAmelCase = feat_extract_first.mel_filters
UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase = os.path.join(snake_case__ , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case__ )
UpperCAmelCase = self.feature_extraction_class.from_json_file(snake_case__ )
UpperCAmelCase = feat_extract_first.to_dict()
UpperCAmelCase = feat_extract_second.to_dict()
UpperCAmelCase = feat_extract_first.mel_filters
UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = [np.asarray(snake_case__ ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(snake_case__ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(snake_case__ , return_tensors="""np""" ).input_features
UpperCAmelCase = feature_extractor(snake_case__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
UpperCAmelCase = np.asarray(snake_case__ )
UpperCAmelCase = feature_extractor(snake_case__ , return_tensors="""np""" ).input_features
UpperCAmelCase = feature_extractor(snake_case__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test truncation required
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
UpperCAmelCase = [np.asarray(snake_case__ ) for speech_input in speech_inputs]
UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCAmelCase = [np.asarray(snake_case__ ) for speech_input in speech_inputs_truncated]
UpperCAmelCase = feature_extractor(snake_case__ , return_tensors="""np""" ).input_features
UpperCAmelCase = feature_extractor(snake_case__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_00 , 32 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase_ ( self , snake_case__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort("""id""" ).select(range(snake_case__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = WhisperFeatureExtractor()
UpperCAmelCase = feature_extractor(snake_case__ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case__ , atol=1e-4 ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = self._load_datasamples(1 )[0]
UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case__ )[0]
self.assertTrue(np.all(np.mean(snake_case__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case__ ) - 1 ) < 1e-3 ) )
| 248
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
UpperCAmelCase = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = dct.pop(lowerCAmelCase )
UpperCAmelCase = val
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=lowerCAmelCase )
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
if "vqa" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 3129
UpperCAmelCase = """huggingface/label-files"""
UpperCAmelCase = """vqa2-id2label.json"""
UpperCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = ViltForQuestionAnswering(lowerCAmelCase )
elif "nlvr" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 2
UpperCAmelCase = {0: """False""", 1: """True"""}
UpperCAmelCase = {v: k for k, v in config.idalabel.items()}
UpperCAmelCase = 3
UpperCAmelCase = ViltForImagesAndTextClassification(lowerCAmelCase )
elif "irtr" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = ViltForImageAndTextRetrieval(lowerCAmelCase )
elif "mlm_itm" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = ViltForMaskedLM(lowerCAmelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
UpperCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" )["""state_dict"""]
UpperCAmelCase = create_rename_keys(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase )
if mlm_model or irtr_model:
UpperCAmelCase = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCAmelCase , UpperCAmelCase = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowerCAmelCase )
# Define processor
UpperCAmelCase = ViltImageProcessor(size=384 )
UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase = ViltProcessor(lowerCAmelCase , lowerCAmelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCAmelCase = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowerCAmelCase ).raw )
UpperCAmelCase = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowerCAmelCase ).raw )
UpperCAmelCase = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
UpperCAmelCase = processor(lowerCAmelCase , lowerCAmelCase , return_tensors="""pt""" )
UpperCAmelCase = processor(lowerCAmelCase , lowerCAmelCase , return_tensors="""pt""" )
UpperCAmelCase = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
UpperCAmelCase = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=lowerCAmelCase ).raw )
if mlm_model:
UpperCAmelCase = """a bunch of [MASK] laying on a [MASK]."""
else:
UpperCAmelCase = """How many cats are there?"""
UpperCAmelCase = processor(lowerCAmelCase , lowerCAmelCase , return_tensors="""pt""" )
UpperCAmelCase = model(**lowerCAmelCase )
# Verify outputs
if mlm_model:
UpperCAmelCase = torch.Size([1, 11, 30522] )
UpperCAmelCase = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
UpperCAmelCase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCAmelCase = torch.Size([1, 3129] )
UpperCAmelCase = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase , atol=1e-4 )
# verify vqa prediction equals "2"
UpperCAmelCase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCAmelCase = torch.Size([1, 2] )
UpperCAmelCase = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 248
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =42
a_ =None
# Automatically constructed
a_ ="dict"
a_ =None
a_ =field(default="""Translation""" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : str ) -> List[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _lowercase ( self : Optional[Any] ) -> Dict:
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =None
a_ =None
a_ =None
# Automatically constructed
a_ ="dict"
a_ =None
a_ =field(default="""TranslationVariableLanguages""" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> str:
__lowerCamelCase : str = sorted(set(self.languages ) ) if self.languages else None
__lowerCamelCase : Tuple = len(self.languages ) if self.languages else None
def __call__( self : str ) -> List[Any]:
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def _lowercase ( self : Any , _a : Any ) -> Optional[int]:
__lowerCamelCase : Any = set(self.languages )
if self.languages and set(a__ ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(a__ ) - lang_set ) )}) are not in valid set ({", ".join(a__ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__lowerCamelCase : Optional[int] = []
for lang, text in translation_dict.items():
if isinstance(a__ , a__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__lowerCamelCase : List[str] = zip(*sorted(a__ ) )
return {"language": languages, "translation": translations}
def _lowercase ( self : int ) -> int:
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 208
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 0
|
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
__UpperCamelCase =only_cross_attention
__UpperCamelCase =(num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__UpperCamelCase =(num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__UpperCamelCase =AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
__UpperCamelCase =AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCamelCase =nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
__UpperCamelCase =Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__UpperCamelCase =(
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
__UpperCamelCase =Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
__UpperCamelCase =None
__UpperCamelCase =None
# 3. Feed-forward
__UpperCamelCase =nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
__UpperCamelCase =FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
__UpperCamelCase =None
__UpperCamelCase =0
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =chunk_size
__UpperCamelCase =dim
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ) -> int:
'''simple docstring'''
if self.use_ada_layer_norm:
__UpperCamelCase =self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
__UpperCamelCase =self.norma(UpperCamelCase__ )
__UpperCamelCase =cross_attention_kwargs if cross_attention_kwargs is not None else {}
__UpperCamelCase =self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
__UpperCamelCase =gate_msa.unsqueeze(1 ) * attn_output
__UpperCamelCase =attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__UpperCamelCase =(
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
__UpperCamelCase =self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
__UpperCamelCase =attn_output + hidden_states
# 3. Feed-forward
__UpperCamelCase =self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
__UpperCamelCase =norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
__UpperCamelCase =norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__UpperCamelCase =torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__UpperCamelCase =self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
__UpperCamelCase =gate_mlp.unsqueeze(1 ) * ff_output
__UpperCamelCase =ff_output + hidden_states
return hidden_states
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ) -> List[Any]:
'''simple docstring'''
super().__init__()
__UpperCamelCase =int(dim * mult )
__UpperCamelCase =dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__UpperCamelCase =GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
__UpperCamelCase =GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='''tanh''' )
elif activation_fn == "geglu":
__UpperCamelCase =GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
__UpperCamelCase =ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Tuple ) -> Dict:
'''simple docstring'''
for module in self.net:
__UpperCamelCase =module(UpperCamelCase__ )
return hidden_states
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ) -> List[Any]:
'''simple docstring'''
super().__init__()
__UpperCamelCase =nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =approximate
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
__UpperCamelCase =self.proj(UpperCamelCase__ )
__UpperCamelCase =self.gelu(UpperCamelCase__ )
return hidden_states
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__UpperCamelCase =nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase =self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple:
'''simple docstring'''
super().__init__()
__UpperCamelCase =nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : str ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.7_02 * x )
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
super().__init__()
__UpperCamelCase =nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =nn.SiLU()
__UpperCamelCase =nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
__UpperCamelCase =nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
__UpperCamelCase , __UpperCamelCase =torch.chunk(UpperCamelCase__ , 2 )
__UpperCamelCase =self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().__init__()
__UpperCamelCase =CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =nn.SiLU()
__UpperCamelCase =nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
__UpperCamelCase =nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1E-6 )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=None ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =emb.chunk(6 , dim=1 )
__UpperCamelCase =self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1E-5 ) -> List[str]:
'''simple docstring'''
super().__init__()
__UpperCamelCase =num_groups
__UpperCamelCase =eps
if act_fn is None:
__UpperCamelCase =None
else:
__UpperCamelCase =get_activation(UpperCamelCase__ )
__UpperCamelCase =nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> Tuple:
'''simple docstring'''
if self.act:
__UpperCamelCase =self.act(UpperCamelCase__ )
__UpperCamelCase =self.linear(UpperCamelCase__ )
__UpperCamelCase =emb[:, :, None, None]
__UpperCamelCase , __UpperCamelCase =emb.chunk(2 , dim=1 )
__UpperCamelCase =F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
__UpperCamelCase =x * (1 + scale) + shift
return x
| 85
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 85
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :Dict = XLMTokenizer
_UpperCAmelCase :List[Any] = False
def UpperCAmelCase__ ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ : int =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase_ : str =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCamelCase_ : int =["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCamelCase_ : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : Optional[Any] ="lower newer"
lowerCamelCase_ : str ="lower newer"
return input_text, output_text
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : str =XLMTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase_ : List[str] ="lower"
lowerCamelCase_ : Tuple =["low", "er</w>"]
lowerCamelCase_ : Optional[Any] =tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[Any] =tokens + ["<unk>"]
lowerCamelCase_ : Dict =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Any =XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
lowerCamelCase_ : Optional[int] =tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
lowerCamelCase_ : str =tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
lowerCamelCase_ : Optional[Any] =tokenizer.build_inputs_with_special_tokens(snake_case__ )
lowerCamelCase_ : Dict =tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 144
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case ( lowerCamelCase__ : Tuple ) -> List[Any]:
lowerCamelCase_ : Union[str, Any] =384
if "tiny" in model_name:
lowerCamelCase_ : str =[3, 3, 9, 3]
lowerCamelCase_ : Union[str, Any] =[96, 192, 384, 768]
if "small" in model_name:
lowerCamelCase_ : Tuple =[3, 3, 27, 3]
lowerCamelCase_ : List[str] =[96, 192, 384, 768]
if "base" in model_name:
lowerCamelCase_ : Tuple =[3, 3, 27, 3]
lowerCamelCase_ : Tuple =[128, 256, 512, 1_024]
lowerCamelCase_ : str =512
if "large" in model_name:
lowerCamelCase_ : Optional[int] =[3, 3, 27, 3]
lowerCamelCase_ : Optional[int] =[192, 384, 768, 1_536]
lowerCamelCase_ : Optional[Any] =768
if "xlarge" in model_name:
lowerCamelCase_ : str =[3, 3, 27, 3]
lowerCamelCase_ : Optional[Any] =[256, 512, 1_024, 2_048]
lowerCamelCase_ : Any =1_024
# set label information
lowerCamelCase_ : Dict =150
lowerCamelCase_ : Union[str, Any] ="huggingface/label-files"
lowerCamelCase_ : Optional[int] ="ade20k-id2label.json"
lowerCamelCase_ : str =json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ : Dict ={int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ : Optional[Any] ={v: k for k, v in idalabel.items()}
lowerCamelCase_ : Optional[int] =ConvNextConfig(
depths=lowerCamelCase__ , hidden_sizes=lowerCamelCase__ , out_features=["stage1", "stage2", "stage3", "stage4"] )
lowerCamelCase_ : Any =UperNetConfig(
backbone_config=lowerCamelCase__ , auxiliary_in_channels=lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , )
return config
def _snake_case ( lowerCamelCase__ : str ) -> str:
lowerCamelCase_ : List[str] =[]
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any ) -> Dict:
lowerCamelCase_ : List[str] =dct.pop(lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =val
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] ) -> Dict:
lowerCamelCase_ : Union[str, Any] ={
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
lowerCamelCase_ : Optional[int] =model_name_to_url[model_name]
lowerCamelCase_ : Optional[Any] =torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["state_dict"]
lowerCamelCase_ : List[Any] =get_upernet_config(lowerCamelCase__ )
lowerCamelCase_ : Tuple =UperNetForSemanticSegmentation(lowerCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase_ : Optional[Any] =state_dict.pop(lowerCamelCase__ )
if "bn" in key:
lowerCamelCase_ : str =key.replace("bn" , "batch_norm" )
lowerCamelCase_ : Union[str, Any] =val
# rename keys
lowerCamelCase_ : Tuple =create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# verify on image
lowerCamelCase_ : List[str] ="https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowerCamelCase_ : Union[str, Any] =Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("RGB" )
lowerCamelCase_ : List[str] =SegformerImageProcessor()
lowerCamelCase_ : int =processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values
with torch.no_grad():
lowerCamelCase_ : Tuple =model(lowerCamelCase__ )
if model_name == "upernet-convnext-tiny":
lowerCamelCase_ : List[Any] =torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
lowerCamelCase_ : Dict =torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
lowerCamelCase_ : Tuple =torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
lowerCamelCase_ : Dict =torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
lowerCamelCase_ : List[Any] =torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f'upernet-convnext-{size}' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A__ : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 144
| 1
|
import warnings
from functools import wraps
from typing import Callable
def __lowerCamelCase ( _lowercase ) -> Callable:
@wraps(_lowerCamelCase )
def _inner_fn(*_lowercase , **_lowercase ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , _lowerCamelCase , )
return fn(*_lowerCamelCase , **_lowerCamelCase )
return _inner_fn
| 363
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a : Dict = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 338
| 0
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : str=2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=1_0_0_0 , ) -> str:
a_ : Optional[Any] = parent
a_ : List[str] = batch_size
a_ : List[str] = seq_length
a_ : str = is_training
a_ : str = use_input_mask
a_ : int = use_token_type_ids
a_ : List[str] = use_labels
a_ : Optional[int] = vocab_size
a_ : Any = hidden_size
a_ : int = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : str = intermediate_size
a_ : Union[str, Any] = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : int = max_position_embeddings
a_ : Tuple = type_vocab_size
a_ : Optional[Any] = type_sequence_label_size
a_ : Tuple = initializer_range
a_ : Dict = num_labels
a_ : str = scope
a_ : Optional[int] = range_bbox
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ : int = bbox[i, j, 3]
a_ : str = bbox[i, j, 1]
a_ : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ : Tuple = bbox[i, j, 2]
a_ : List[str] = bbox[i, j, 0]
a_ : Union[str, Any] = t
a_ : List[Any] = None
if self.use_input_mask:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
a_ : List[Any] = None
if self.use_token_type_ids:
a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : int = None
a_ : Tuple = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str:
a_ : Any = LiltModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Any = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int:
a_ : Any = self.num_labels
a_ : str = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str:
a_ : Union[str, Any] = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : List[str] = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : int = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : List[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : str = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int:
return True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : str = LiltModelTester(self )
a_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ : List[str] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(SCREAMING_SNAKE_CASE__ )
a_ : str = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
a_ : str = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = torch.Size([1, 2, 7_6_8] )
a_ : int = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 32
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx"
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ):
__lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) )
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowercase ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : int ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : str ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Dict ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _lowercase ( self : Dict ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase ( self : str ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
__lowercase = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" )
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 17
| 0
|
'''simple docstring'''
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : List[str] = len(lowerCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __A ( lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) <= 1:
return arr, 0
_UpperCAmelCase : List[str] = len(lowerCAmelCase_ ) // 2
_UpperCAmelCase : str = arr[0:mid]
_UpperCAmelCase : Any = arr[mid:]
_UpperCAmelCase , _UpperCAmelCase : Tuple = count_inversions_recursive(lowerCAmelCase_ )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = count_inversions_recursive(lowerCAmelCase_ )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = 0
while i < len(lowerCAmelCase_ ) and j < len(lowerCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __A ( ):
_UpperCAmelCase : Dict = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_UpperCAmelCase : Dict = count_inversions_bf(lowerCAmelCase_ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , lowerCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_UpperCAmelCase : List[Any] = count_inversions_bf(lowerCAmelCase_ )
_UpperCAmelCase , _UpperCAmelCase : Any = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase_ )
# an empty list should also have zero inversions
_UpperCAmelCase : Any = []
_UpperCAmelCase : Dict = count_inversions_bf(lowerCAmelCase_ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 170
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
snake_case : Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
snake_case : Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
snake_case : int = field(
default=1_0_2_4 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
snake_case : bool = field(
default=__a , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
snake_case : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
snake_case : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
snake_case : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """A csv or a json file containing the training data."""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """A csv or a json file containing the validation data."""} )
snake_case : Optional[str] = field(default=__a , metadata={"""help""": """A csv or a json file containing the test data."""} )
def snake_case_ (self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
_UpperCAmelCase : List[str] = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
snake_case : str = field(
default=__a , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
snake_case : bool = field(
default=__a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
snake_case : bool = field(
default=__a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase : Dict = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase : Dict = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase : int = data_args.train_file.split(""".""" )[-1]
_UpperCAmelCase : str = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase : Optional[Any] = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
_UpperCAmelCase : List[str] = load_dataset("""csv""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase : List[str] = load_dataset("""json""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase : Optional[int] = raw_datasets["""train"""].features["""label"""].names
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase : Optional[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase_ , )
_UpperCAmelCase : str = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase : int = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase : List[str] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase : Dict = {"""Refused""": 0, """Entailed""": 1}
_UpperCAmelCase : List[Any] = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_UpperCAmelCase : str = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase_ ):
_UpperCAmelCase : int = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
_UpperCAmelCase : Union[str, Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase : Tuple = examples["""statement"""]
_UpperCAmelCase : str = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
_UpperCAmelCase : Optional[int] = tokenizer(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_UpperCAmelCase : int = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
_UpperCAmelCase : str = raw_datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
_UpperCAmelCase : Dict = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
_UpperCAmelCase : List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
_UpperCAmelCase : Optional[int] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Dict = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
_UpperCAmelCase : Tuple = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
_UpperCAmelCase : Tuple = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase_ ) ) , 3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase_ ) else p.predictions
_UpperCAmelCase : int = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase : Dict = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase : List[str] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 )
else:
_UpperCAmelCase : Optional[Any] = None
# Initialize our Trainer
_UpperCAmelCase : Optional[Any] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : Optional[Any] = last_checkpoint
_UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = train_result.metrics
_UpperCAmelCase : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
_UpperCAmelCase : Optional[Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowerCAmelCase_ )
trainer.save_metrics("""train""" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase : Dict = trainer.evaluate(eval_dataset=lowerCAmelCase_ )
_UpperCAmelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""eval""" , lowerCAmelCase_ )
trainer.save_metrics("""eval""" , lowerCAmelCase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase : str = predict_dataset.remove_columns("""label""" )
_UpperCAmelCase : List[Any] = trainer.predict(lowerCAmelCase_ , metric_key_prefix="""predict""" ).predictions
_UpperCAmelCase : Dict = np.argmax(lowerCAmelCase_ , axis=1 )
_UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = label_list[item]
writer.write(f"{index}\t{item}\n" )
_UpperCAmelCase : Union[str, Any] = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def __A ( lowerCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 170
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Union[str, Any] = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 248
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__snake_case : Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Dict = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""")
parser.add_argument("""--file_path""" , type=a__ , default="""data/dump.txt""" , help="""The path to the data.""")
parser.add_argument("""--tokenizer_type""" , type=a__ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""])
parser.add_argument("""--tokenizer_name""" , type=a__ , default="""bert-base-uncased""" , help="""The tokenizer to use.""")
parser.add_argument("""--dump_file""" , type=a__ , default="""data/dump""" , help="""The dump file prefix.""")
a_ : int = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''')
if args.tokenizer_type == "bert":
a_ : Any = BertTokenizer.from_pretrained(args.tokenizer_name)
a_ : Tuple = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
a_ : Optional[Any] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
a_ : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name)
a_ : str = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
a_ : Any = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
a_ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name)
a_ : Optional[int] = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
a_ : List[Any] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''')
with open(args.file_path , """r""" , encoding="""utf8""") as fp:
a_ : Optional[Any] = fp.readlines()
logger.info("""Start encoding""")
logger.info(f'''{len(a__)} examples to process.''')
a_ : str = []
a_ : Optional[Any] = 0
a_ : str = 1_0_0_0_0
a_ : List[str] = time.time()
for text in data:
a_ : int = f'''{bos} {text.strip()} {sep}'''
a_ : str = tokenizer.encode(a__ , add_special_tokens=a__)
rslt.append(a__)
iter += 1
if iter % interval == 0:
a_ : str = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''')
a_ : int = time.time()
logger.info("""Finished binarization""")
logger.info(f'''{len(a__)} examples processed.''')
a_ : Union[str, Any] = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
a_ : int = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
a_ : List[Any] = [np.uintaa(a__) for d in rslt]
else:
a_ : List[str] = [np.intaa(a__) for d in rslt]
random.shuffle(rslt_)
logger.info(f'''Dump to {dp_file}''')
with open(a__ , """wb""") as handle:
pickle.dump(rslt_ , a__ , protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
| 248
| 1
|
import argparse
import os
import re
import packaging.version
lowerCAmelCase_ = "examples/"
lowerCAmelCase_ = {
"examples": (re.compile(R'^check_min_version\(\"[^\"]+\"\)\s*$', re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R'^__version__\s+=\s+\"([^\"]+)\"\s*$', re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R'^(\s*)version\s*=\s*\"[^\"]+\",', re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(R'^(\s*)release\s*=\s*\"[^\"]+\"$', re.MULTILINE), "release = \"VERSION\"\n"),
}
lowerCAmelCase_ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
lowerCAmelCase_ = "README.md"
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
with open(lowerCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : Dict = f.read()
lowercase , lowercase : Optional[int] = REPLACE_PATTERNS[pattern]
lowercase : int = replace.replace('''VERSION''' , lowerCamelCase__ )
lowercase : Tuple = re_pattern.sub(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(lowerCamelCase__ )
def snake_case( __magic_name__ ) -> List[str]:
for folder, directories, fnames in os.walk(lowerCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ , pattern='''examples''' )
def snake_case( __magic_name__ , __magic_name__=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if not patch:
update_version_in_examples(lowerCamelCase__ )
def snake_case( ) -> str:
lowercase : Optional[int] = '''🤗 Transformers currently provides the following architectures'''
lowercase : Dict = '''1. Want to contribute a new model?'''
with open(lowerCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : str = f.readlines()
# Find the start of the list.
lowercase : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowerCamelCase__ )
def snake_case( ) -> int:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase : Tuple = f.read()
lowercase : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(lowerCamelCase__ ).groups()[0]
return packaging.version.parse(lowerCamelCase__ )
def snake_case( __magic_name__=False ) -> List[str]:
lowercase : List[str] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase : Union[str, Any] = default_version.base_version
elif patch:
lowercase : Optional[int] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowercase : Tuple = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowercase : Optional[int] = input(F"""Which version are you releasing? [{default_version}]""" )
if len(lowerCamelCase__ ) == 0:
lowercase : Optional[Any] = default_version
print(F"""Updating version to {version}.""" )
global_version_update(lowerCamelCase__ , patch=lowerCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def snake_case( ) -> Any:
lowercase : List[str] = get_version()
lowercase : Dict = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowercase : Tuple = current_version.base_version
# Check with the user we got that right.
lowercase : Any = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(lowerCamelCase__ ) == 0:
lowercase : str = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(lowerCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 350
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ = get_tests_dir('fixtures/vocab.json')
lowerCAmelCase_ = get_tests_dir('fixtures')
class _A ( unittest.TestCase ):
_UpperCamelCase : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] = 0
def __a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_A , _A )
def __a ( self : Any ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str = WavaVecaConfig()
lowercase : Tuple = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(_A )
processor.save_pretrained(_A )
lowercase : Optional[Any] = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_A , os.path.join(_A , _A ) )
copyfile(_A , os.path.join(_A , '''vocab.json''' ) )
lowercase : Any = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : Tuple ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : List[Any] = WavaVecaFeatureExtractor()
lowercase : Optional[int] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowercase : Union[str, Any] = WavaVecaProcessor(_A , _A )
# save in new folder
processor.save_pretrained(_A )
# drop `processor_class` in tokenizer
with open(os.path.join(_A , _A ) , '''r''' ) as f:
lowercase : Union[str, Any] = json.load(_A )
config_dict.pop('''processor_class''' )
with open(os.path.join(_A , _A ) , '''w''' ) as f:
f.write(json.dumps(_A ) )
lowercase : int = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : str ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int = WavaVecaFeatureExtractor()
lowercase : Dict = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowercase : Optional[int] = WavaVecaProcessor(_A , _A )
# save in new folder
processor.save_pretrained(_A )
# drop `processor_class` in feature extractor
with open(os.path.join(_A , _A ) , '''r''' ) as f:
lowercase : int = json.load(_A )
config_dict.pop('''processor_class''' )
with open(os.path.join(_A , _A ) , '''w''' ) as f:
f.write(json.dumps(_A ) )
lowercase : Any = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(_A )
# copy relevant files
copyfile(_A , os.path.join(_A , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(_A , _A ) , '''w''' ) as f:
f.write('''{}''' )
lowercase : Tuple = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_A ):
lowercase : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
lowercase : List[str] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A )
lowercase : str = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
lowercase : Union[str, Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
lowercase : Optional[int] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A , use_fast=_A )
lowercase : Any = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , _A )
AutoFeatureExtractor.register(_A , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoProcessor.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoProcessor.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase : Optional[Any] = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : List[Any] = os.path.join(_A , '''vocab.txt''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : Optional[int] = CustomTokenizer(_A )
lowercase : Union[str, Any] = CustomProcessor(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_A )
lowercase : Dict = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __a ( self : str ) -> Any:
"""simple docstring"""
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = False
class _A ( _lowerCamelCase ):
_UpperCamelCase : Union[str, Any] = False
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = '''AutoFeatureExtractor'''
_UpperCamelCase : Any = '''AutoTokenizer'''
_UpperCamelCase : Any = False
try:
AutoConfig.register('''custom''' , _A )
AutoFeatureExtractor.register(_A , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoProcessor.register(_A , _A )
# If remote code is not set, the default is to use local classes.
lowercase : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __a ( self : Any ) -> int:
"""simple docstring"""
lowercase : Any = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class _A ( unittest.TestCase ):
_UpperCamelCase : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def __a ( cls : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : str = TOKEN
HfFolder.save_token(_A )
@classmethod
def __a ( cls : List[Any] ) -> Dict:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = WavaVecaProcessor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_A , '''test-processor''' ) , push_to_hub=_A , use_auth_token=self._token )
lowercase : Any = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(new_processor.feature_extractor , _A ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __a ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase : int = WavaVecaProcessor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_A , '''test-processor-org''' ) , push_to_hub=_A , use_auth_token=self._token , organization='''valid_org''' , )
lowercase : Dict = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(new_processor.feature_extractor , _A ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __a ( self : str ) -> Dict:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase : List[str] = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Optional[int] = os.path.join(_A , '''vocab.txt''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : Optional[Any] = CustomTokenizer(_A )
lowercase : Optional[Any] = CustomProcessor(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase : List[str] = Repository(_A , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(_A )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_A , '''tokenizer_config.json''' ) ) as f:
lowercase : Optional[Any] = json.load(_A )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_A , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_A , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_A , '''custom_processing.py''' ) ) )
repo.push_to_hub()
lowercase : Tuple = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=_A )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 116
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "roformer"
def __init__( self , a__=50_000 , a__=None , a__=768 , a__=12 , a__=12 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=1_536 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0 , a__=False , a__=True , **a__ , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=a__ , **a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size if embedding_size is None else embedding_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = rotary_value
snake_case_ = use_cache
class _snake_case ( lowercase_ ):
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ = {0: "batch", 1: "sequence"}
snake_case_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 85
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "upernet"
def __init__( self , a__=None , a__=512 , a__=0.0_2 , a__=[1, 2, 3, 6] , a__=True , a__=0.4 , a__=384 , a__=256 , a__=1 , a__=False , a__=255 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a__ , a__ ):
snake_case_ = backbone_config.get("model_type" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(a__ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85
| 1
|
'''simple docstring'''
import argparse
import datetime
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> str:
_SCREAMING_SNAKE_CASE = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
_SCREAMING_SNAKE_CASE = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__A ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
_SCREAMING_SNAKE_CASE = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
_SCREAMING_SNAKE_CASE = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
_SCREAMING_SNAKE_CASE = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
_SCREAMING_SNAKE_CASE = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
_SCREAMING_SNAKE_CASE = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
_SCREAMING_SNAKE_CASE = datetime.date(int(__A ) , int(__A ) , int(__A ) )
# Start math
if m <= 2:
_SCREAMING_SNAKE_CASE = y - 1
_SCREAMING_SNAKE_CASE = m + 12
# maths var
_SCREAMING_SNAKE_CASE = int(str(__A )[:2] )
_SCREAMING_SNAKE_CASE = int(str(__A )[2:] )
_SCREAMING_SNAKE_CASE = int(2.6 * m - 5.3_9 )
_SCREAMING_SNAKE_CASE = int(c / 4 )
_SCREAMING_SNAKE_CASE = int(k / 4 )
_SCREAMING_SNAKE_CASE = int(d + k )
_SCREAMING_SNAKE_CASE = int(t + u + v + x )
_SCREAMING_SNAKE_CASE = int(z - (2 * c) )
_SCREAMING_SNAKE_CASE = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
_SCREAMING_SNAKE_CASE = f"""Your date {date_input}, is a {days[str(__A )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
lowerCamelCase_ = parser.parse_args()
zeller(args.date_input)
| 356
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Dict:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> str:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__A , id=__A )
| 111
| 0
|
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Base Case
if curr_ind == len(_UpperCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_UpperCAmelCase ) ):
if valid_connection(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Insert current vertex into path as next transition
SCREAMING_SNAKE_CASE_: Optional[int] = next_ver
# Validate created path
if util_hamilton_cycle(_UpperCAmelCase , _UpperCAmelCase , curr_ind + 1 ):
return True
# Backtrack
SCREAMING_SNAKE_CASE_: Optional[int] = -1
return False
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 0 ):
SCREAMING_SNAKE_CASE_: Dict = [-1] * (len(_UpperCAmelCase ) + 1)
# initialize start and end of path with starting index
SCREAMING_SNAKE_CASE_: int = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_UpperCAmelCase , _UpperCAmelCase , 1 ) else []
| 13
|
lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ ( ) -> None:
lowerCAmelCase = input('''Enter message: ''' )
lowerCAmelCase = input('''Enter key [alphanumeric]: ''' )
lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase = '''encrypt'''
lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase = '''decrypt'''
lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ )
print(f"\n{mode.title()}ed message:" )
print(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = key.upper()
for symbol in message:
lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
lowerCAmelCase = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 338
| 0
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
for attribute in key.split("." ):
a_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
a_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
a_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
else:
a_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.feature_extractor
a_ = hf_model.adapter
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
a_ = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(UpperCAmelCase )[0].split("." )[-2]
a_ = mapped_key.replace("*" , UpperCAmelCase )
if "weight_g" in name:
a_ = "weight_g"
elif "weight_v" in name:
a_ = "weight_v"
elif "bias" in name:
a_ = "bias"
elif "weight" in name:
a_ = "weight"
else:
a_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = full_name.split("conv_layers." )[-1]
a_ = name.split("." )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = full_name.split("adaptor." )[-1]
a_ = name.split("." )
if items[1].isdigit():
a_ = int(items[1] )
else:
a_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
a_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ , a_ = emb.weight.shape
a_ = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
a_ = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->List[str]:
"""simple docstring"""
a_ = WavaVecaConfig.from_pretrained(
UpperCAmelCase , add_adapter=UpperCAmelCase , adapter_stride=UpperCAmelCase , adapter_kernel_size=UpperCAmelCase , use_auth_token=UpperCAmelCase , output_hidden_size=UpperCAmelCase , )
a_ = MBartConfig.from_pretrained(UpperCAmelCase )
# load model
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
a_ = model[0].eval()
# load feature extractor
a_ = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase , use_auth_token=UpperCAmelCase )
# set weights for wav2vec2 encoder
a_ = WavaVecaModel(UpperCAmelCase )
recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase )
# load decoder weights
a_ = MBartForCausalLM(UpperCAmelCase )
a_ , a_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
a_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase , decoder=UpperCAmelCase )
a_ = False
a_ = MBartaaTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
a_ = hf_wavavec.config.to_dict()
a_ = tokenizer.pad_token_id
a_ = tokenizer.bos_token_id
a_ = tokenizer.eos_token_id
a_ = "mbart50"
a_ = "wav2vec2"
a_ = tokenizer.eos_token_id
a_ = 250_004
a_ = tokenizer.eos_token_id
a_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase )
hf_wavavec.save_pretrained(UpperCAmelCase )
feature_extractor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250004, type=int, help='`decoder_start_token_id` of model config')
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 352
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Union[str, Any] ={
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any =[
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 170
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Union[str, Any] ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 170
| 1
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = field(default=a_ , metadata={"help": "Whether to use SortishSampler or not."} )
a = field(
default=a_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
a = field(
default=a_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
a = field(
default=a_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
a = field(
default=a_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def A ( self : Tuple):
_A : Tuple = super().to_dict()
for k, v in d.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_A : int = v.to_dict()
return d
| 351
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A : Tuple = logging.get_logger(__name__)
A : Tuple = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "longformer"
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[List[int], int] = 512 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 30522 , SCREAMING_SNAKE_CASE : int = 768 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 3072 , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : float = 1e-12 , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : List[Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : List[Any] = attention_window
_A : int = sep_token_id
_A : Tuple = bos_token_id
_A : Any = eos_token_id
_A : List[str] = vocab_size
_A : Any = hidden_size
_A : Optional[int] = num_hidden_layers
_A : int = num_attention_heads
_A : Dict = hidden_act
_A : List[Any] = intermediate_size
_A : int = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : Any = type_vocab_size
_A : Dict = initializer_range
_A : Any = layer_norm_eps
_A : List[Any] = onnx_export
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : "PretrainedConfig" , SCREAMING_SNAKE_CASE : str = "default" , SCREAMING_SNAKE_CASE : "List[PatchingSpec]" = None):
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : Optional[Any] = True
@property
def A ( self : List[str]):
if self.task == "multiple-choice":
_A : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
])
@property
def A ( self : str):
_A : int = super().outputs
if self.task == "default":
_A : str = {0: 'batch'}
return outputs
@property
def A ( self : List[Any]):
return 1e-4
@property
def A ( self : Dict):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def A ( self : str , SCREAMING_SNAKE_CASE : "PreTrainedTokenizerBase" , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ):
_A : Union[str, Any] = super().generate_dummy_inputs(
preprocessor=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_A : Tuple = torch.zeros_like(inputs['input_ids'])
# make every second token global
_A : Dict = 1
return inputs
| 227
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : Optional[Any] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_:Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = ["input_features", "is_longer"]
def __init__( self, lowerCamelCase__=64, lowerCamelCase__=4_8000, lowerCamelCase__=480, lowerCamelCase__=10, lowerCamelCase__=1024, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__ = 0, lowerCamelCase__ = 1_4000, lowerCamelCase__ = None, lowerCamelCase__ = "fusion", lowerCamelCase__ = "repeatpad", **lowerCamelCase__, ):
super().__init__(
feature_size=lowerCamelCase__, sampling_rate=lowerCamelCase__, padding_value=lowerCamelCase__, return_attention_mask=lowerCamelCase__, **lowerCamelCase__, )
A : Tuple = top_db
A : Dict = truncation
A : int = padding
A : Optional[Any] = fft_window_size
A : Optional[Any] = (fft_window_size >> 1) + 1
A : List[Any] = hop_length
A : Tuple = max_length_s
A : str = max_length_s * sampling_rate
A : List[str] = sampling_rate
A : Dict = frequency_min
A : str = frequency_max
A : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase__, min_frequency=lowerCamelCase__, max_frequency=lowerCamelCase__, sampling_rate=lowerCamelCase__, norm=lowerCamelCase__, mel_scale="""htk""", )
A : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase__, min_frequency=lowerCamelCase__, max_frequency=lowerCamelCase__, sampling_rate=lowerCamelCase__, norm="""slaney""", mel_scale="""slaney""", )
def _lowerCAmelCase ( self ):
A : Any = copy.deepcopy(self.__dict__ )
A : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Optional[Any] = spectrogram(
lowerCamelCase__, window_function(self.fft_window_size, """hann""" ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase__, log_mel="""dB""", )
return log_mel_spectrogram.T
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A : Any = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A : Union[str, Any] = [0]
# randomly choose index for each part
A : str = np.random.choice(ranges[0] )
A : str = np.random.choice(ranges[1] )
A : Tuple = np.random.choice(ranges[2] )
A : List[str] = mel[idx_front : idx_front + chunk_frames, :]
A : Union[str, Any] = mel[idx_middle : idx_middle + chunk_frames, :]
A : Any = mel[idx_back : idx_back + chunk_frames, :]
A : Any = torch.tensor(mel[None, None, :] )
A : Union[str, Any] = torch.nn.functional.interpolate(
lowerCamelCase__, size=[chunk_frames, 64], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : List[Any] = mel_shrink[0][0].numpy()
A : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A : int = len(lowerCamelCase__ ) - max_length
A : Optional[int] = np.random.randint(0, overflow + 1 )
A : List[Any] = waveform[idx : idx + max_length]
A : Optional[Any] = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A : List[str] = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters )
A : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A : Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A : str = np.stack([mel, mel, mel, mel], axis=0 )
A : Union[str, Any] = False
else:
A : Dict = self._random_mel_fusion(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : Tuple = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
A : int = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A : Optional[int] = int(max_length / len(lowerCamelCase__ ) )
A : Union[str, Any] = np.stack(np.tile(lowerCamelCase__, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A : Dict = int(max_length / len(lowerCamelCase__ ) )
A : Dict = np.stack(np.tile(lowerCamelCase__, lowerCamelCase__ ) )
A : Any = np.pad(lowerCamelCase__, (0, max_length - waveform.shape[0]), mode="""constant""", constant_values=0 )
if truncation == "fusion":
A : Union[str, Any] = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters )
A : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
A : List[str] = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Any = truncation if truncation is not None else self.truncation
A : Union[str, Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A : str = isinstance(lowerCamelCase__, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
A : Any = is_batched_numpy or (
isinstance(lowerCamelCase__, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
A : Optional[int] = [np.asarray(lowerCamelCase__, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__, np.ndarray ):
A : int = np.asarray(lowerCamelCase__, dtype=np.floataa )
elif isinstance(lowerCamelCase__, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A : List[Any] = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
A : Tuple = [
self._get_input_mel(lowerCamelCase__, max_length if max_length else self.nb_max_samples, lowerCamelCase__, lowerCamelCase__ )
for waveform in raw_speech
]
A : Optional[Any] = []
A : Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A : Tuple = np.random.randint(0, len(lowerCamelCase__ ) )
A : str = True
if isinstance(input_mel[0], lowerCamelCase__ ):
A : int = [np.asarray(lowerCamelCase__, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A : Optional[int] = [[longer] for longer in is_longer]
A : Union[str, Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A : Tuple = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
A : List[str] = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 116
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "beit"
def __init__( self , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[3, 5, 7, 11] , _SCREAMING_SNAKE_CASE=[1, 2, 3, 6] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.4 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=255 , **_SCREAMING_SNAKE_CASE , )->str:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Dict = vocab_size
A_ : Any = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : str = initializer_range
A_ : Any = layer_norm_eps
A_ : Tuple = image_size
A_ : List[str] = patch_size
A_ : Dict = num_channels
A_ : Optional[Any] = use_mask_token
A_ : str = use_absolute_position_embeddings
A_ : List[str] = use_relative_position_bias
A_ : Optional[int] = use_shared_relative_position_bias
A_ : List[str] = layer_scale_init_value
A_ : Union[str, Any] = drop_path_rate
A_ : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Optional[Any] = out_indices
A_ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Tuple = use_auxiliary_head
A_ : str = auxiliary_loss_weight
A_ : Union[str, Any] = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Tuple = auxiliary_concat_input
A_ : int = semantic_loss_ignore_index
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = version.parse("1.11" )
@property
def _snake_case ( self )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self )->float:
'''simple docstring'''
return 1e-4
| 352
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
A_ , A_ , A_ : Any = equationa
A_ , A_ , A_ : Union[str, Any] = equationa
# Calculate the determinants of the matrices
A_ : Optional[Any] = aa * ba - aa * ba
A_ : Optional[int] = ca * ba - ca * ba
A_ : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
A_ : Optional[int] = determinant_x / determinant
A_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 65
| 0
|
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
_a = len(SCREAMING_SNAKE_CASE__)
for i in range(SCREAMING_SNAKE_CASE__):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__):
if numbers[j] < numbers[i]:
_a = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 211
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *A : Union[str, Any] , **A : Optional[int] ):
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 111
| 0
|
"""simple docstring"""
def __lowercase ( _a = "The quick brown fox jumps over the lazy dog" , ):
snake_case_ : str = set()
# Replace all the whitespace in our sentence
snake_case_ : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_a ) == 26
def __lowercase ( _a = "The quick brown fox jumps over the lazy dog" , ):
snake_case_ : Optional[Any] = [False] * 26
for char in input_str:
if char.islower():
snake_case_ : Any = True
elif char.isupper():
snake_case_ : List[str] = True
return all(_a )
def __lowercase ( _a = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __lowercase ( ):
from timeit import timeit
snake_case_ : Union[str, Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=_a ) )
print(timeit('''is_pangram_faster()''' , setup=_a ) )
print(timeit('''is_pangram_fastest()''' , setup=_a ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 155
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __lowercase ( _a = 3 ):
if isinstance(_a , _a ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_a ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
snake_case_ : Tuple = QuantumRegister(_a , '''qr''' )
snake_case_ : Optional[Any] = ClassicalRegister(_a , '''cr''' )
snake_case_ : Any = QuantumCircuit(_a , _a )
snake_case_ : int = number_of_qubits
for i in range(_a ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_a ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _a , _a )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_a , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_a , _a )
# simulate with 10000 shots
snake_case_ : Any = Aer.get_backend('''qasm_simulator''' )
snake_case_ : Optional[int] = execute(_a , _a , shots=10_000 )
return job.result().get_counts(_a )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 155
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
__A : Tuple = logging.getLogger(__name__)
__A : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
__A : Dict = {'facebook/bart-base': BartTokenizer}
def __UpperCamelCase ( ) ->Any:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
lowerCamelCase_ =parser.parse_args()
return args
def __UpperCamelCase ( _A : str , _A : List[str]="cpu" ) ->Any:
"""simple docstring"""
lowerCamelCase_ =model_dict[model_name].from_pretrained(_A ).to(_A )
lowerCamelCase_ =tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
lowerCamelCase_ =0
lowerCamelCase_ =None
lowerCamelCase_ =0
return huggingface_model, tokenizer
def __UpperCamelCase ( _A : Tuple , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Optional[int] ) ->Dict:
"""simple docstring"""
model.eval()
lowerCamelCase_ =None
lowerCamelCase_ =torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
lowerCamelCase_ ='''My friends are cool but they eat too many carbs.'''
lowerCamelCase_ =tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
lowerCamelCase_ =model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
lowerCamelCase_ =remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
lowerCamelCase_ =onnxruntime.InferenceSession(_A )
lowerCamelCase_ =ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def __UpperCamelCase ( ) ->Any:
"""simple docstring"""
lowerCamelCase_ =parse_args()
lowerCamelCase_ =5
lowerCamelCase_ =4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCamelCase_ =torch.device(args.device )
lowerCamelCase_ =load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
lowerCamelCase_ =args.max_length
if args.num_beams:
lowerCamelCase_ =args.num_beams
if args.output_file_path:
lowerCamelCase_ =args.output_file_path
else:
lowerCamelCase_ ='''BART.onnx'''
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main()
| 154
|
import math
import os
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
try:
with open(snake_case , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : int = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lexicon.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
__SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key]
__SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
__SCREAMING_SNAKE_CASE : Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:]
__SCREAMING_SNAKE_CASE : int = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 8
try:
with open(snake_case , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303
| 0
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=6, lowerCamelCase__=17, lowerCamelCase__=23, lowerCamelCase__=11, lowerCamelCase__=True, ):
A : List[Any] = parent
A : List[str] = batch_size
A : Union[str, Any] = seq_length
A : List[Any] = act_dim
A : List[str] = state_dim
A : str = hidden_size
A : Tuple = max_length
A : int = is_training
def _lowerCAmelCase ( self ):
A : Any = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
A : List[str] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
A : str = floats_tensor((self.batch_size, self.seq_length, 1) )
A : List[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
A : Optional[int] = ids_tensor((self.batch_size, self.seq_length), vocab_size=1000 )
A : List[Any] = random_attention_mask((self.batch_size, self.seq_length) )
A : List[str] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _lowerCAmelCase ( self ):
return DecisionTransformerConfig(
batch_size=self.batch_size, seq_length=self.seq_length, act_dim=self.act_dim, state_dim=self.state_dim, hidden_size=self.hidden_size, max_length=self.max_length, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : int = DecisionTransformerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[Any] = model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
self.parent.assertEqual(result.state_preds.shape, states.shape )
self.parent.assertEqual(result.action_preds.shape, actions.shape )
self.parent.assertEqual(result.return_preds.shape, returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Optional[Any] = config_and_inputs
A : Dict = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = ()
__lowerCamelCase : Tuple = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : int = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : List[str] = False
__lowerCamelCase : int = False
__lowerCamelCase : int = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : int = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Union[str, Any] = False
def _lowerCAmelCase ( self ):
A : Tuple = DecisionTransformerModelTester(self )
A : Tuple = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : str = DecisionTransformerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[str] = model_class(lowerCamelCase__ )
A : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : str = [*signature.parameters.keys()]
A : Dict = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(lowerCamelCase__ )], lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : int = 2 # number of steps of autoregressive prediction we will perform
A : str = 10 # defined by the RL environment, may be normalized
A : Dict = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
A : Union[str, Any] = model.to(lowerCamelCase__ )
A : Dict = model.config
torch.manual_seed(0 )
A : Optional[int] = torch.randn(1, 1, config.state_dim ).to(device=lowerCamelCase__, dtype=torch.floataa ) # env.reset()
A : int = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]], device=lowerCamelCase__ )
A : Dict = torch.tensor(lowerCamelCase__, device=lowerCamelCase__, dtype=torch.floataa ).reshape(1, 1, 1 )
A : Tuple = state
A : List[str] = torch.zeros(1, 0, config.act_dim, device=lowerCamelCase__, dtype=torch.floataa )
A : List[str] = torch.zeros(1, 0, device=lowerCamelCase__, dtype=torch.floataa )
A : Tuple = torch.tensor(0, device=lowerCamelCase__, dtype=torch.long ).reshape(1, 1 )
for step in range(lowerCamelCase__ ):
A : List[Any] = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=lowerCamelCase__ )], dim=1 )
A : Optional[int] = torch.cat([rewards, torch.zeros(1, 1, device=lowerCamelCase__ )], dim=1 )
A : Optional[int] = torch.ones(1, states.shape[1] ).to(dtype=torch.long, device=states.device )
with torch.no_grad():
A , A , A : Optional[Any] = model(
states=lowerCamelCase__, actions=lowerCamelCase__, rewards=lowerCamelCase__, returns_to_go=lowerCamelCase__, timesteps=lowerCamelCase__, attention_mask=lowerCamelCase__, return_dict=lowerCamelCase__, )
self.assertEqual(action_pred.shape, actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1], expected_outputs[step], atol=1e-4 ) )
A , A , A , A : Optional[int] = ( # env.step(action)
torch.randn(1, 1, config.state_dim ).to(device=lowerCamelCase__, dtype=torch.floataa ),
1.0,
False,
{},
)
A : Union[str, Any] = action_pred[0, -1]
A : str = torch.cat([states, state], dim=1 )
A : Tuple = returns_to_go[0, -1] - reward
A : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1 )], dim=1 )
A : List[Any] = torch.cat(
[timesteps, torch.ones((1, 1), device=lowerCamelCase__, dtype=torch.long ) * (step + 1)], dim=1 )
| 115
|
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Tuple = 2
A : List[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowerCAmelCase )
if n > 1:
factors.append(_lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : List[str] = torch.device("""cpu""")
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Optional[int] = Image.open(requests.get(a__ , stream=a__).raw)
return im
def _UpperCAmelCase ( a__):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01])
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01])
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02])
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02])
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : Any = dct.pop(a__)
a_ : Optional[int] = val
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Tuple = []
for k in state_dict.keys():
a_ : Tuple = k
if ".pwconv" in k:
a_ : List[Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""")
if ".dwconv" in k:
a_ : Optional[Any] = k_new.replace(""".dwconv""" , """.depth_wise_conv""")
if ".Proj." in k:
a_ : List[str] = k_new.replace(""".Proj.""" , """.proj.""")
if "patch_embed" in k_new:
a_ : Any = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""")
if "network" in k_new:
a_ : List[str] = k_new.split(""".""")
if ls[2].isdigit():
a_ : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:])
else:
a_ : List[str] = k_new.replace("""network""" , """swiftformer.encoder.network""")
rename_keys.append((k, k_new))
return rename_keys
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a_ : int = 1_0_0_0
a_ : Optional[Any] = """huggingface/label-files"""
a_ : str = """imagenet-1k-id2label.json"""
a_ : Optional[Any] = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""") , """r"""))
a_ : List[Any] = {int(a__): v for k, v in idalabel.items()}
a_ : Union[str, Any] = idalabel
a_ : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a_ : str = [3, 3, 6, 4]
a_ : Optional[int] = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
a_ : Dict = [3, 3, 9, 6]
a_ : Dict = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
a_ : List[str] = [4, 3, 1_0, 5]
a_ : int = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
a_ : Dict = [4, 4, 1_2, 6]
a_ : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https"""):
a_ : Any = torch.hub.load_state_dict_from_url(a__ , map_location="""cpu""" , check_hash=a__)
else:
a_ : Tuple = torch.load(a__ , map_location="""cpu""")
a_ : str = checkpoint
a_ : Union[str, Any] = create_rename_keys(a__)
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a__ , a__ , a__)
# load HuggingFace model
a_ : Optional[Any] = SwiftFormerForImageClassification(a__).eval()
hf_model.load_state_dict(a__)
# prepare test inputs
a_ : Any = prepare_img()
a_ : Any = ViTImageProcessor.from_pretrained("""preprocessor_config""")
a_ : str = processor(images=a__ , return_tensors="""pt""")
# compare outputs from both models
a_ : Optional[int] = get_expected_output(a__)
a_ : Dict = hf_model(inputs["""pixel_values"""]).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0])
assert torch.allclose(hf_logits[0, 0:5] , a__ , atol=1e-3)
Path(a__).mkdir(exist_ok=a__)
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''')
hf_model.save_pretrained(a__)
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
__snake_case : List[Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 248
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowercase: Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
a = {}
a = {}
a = {}
# preprocess args
if "points_per_batch" in kwargs:
a = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
a = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
a = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
a = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
a = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
a = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
a = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
a = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
a = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
a = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
a = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
a = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(lowerCamelCase_ , *lowerCamelCase_ , num_workers=lowerCamelCase_ , batch_size=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_ = 0 , lowerCamelCase_ = 512 / 1500 , lowerCamelCase_ = 32 , lowerCamelCase_ = 1 , ):
"""simple docstring"""
a = load_image(lowerCamelCase_ )
a = self.image_processor.size["longest_edge"]
a , a , a , a = self.image_processor.generate_crop_boxes(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = self.image_processor(images=lowerCamelCase_ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
a = self.get_inference_context()
with inference_context():
a = self._ensure_tensor_on_device(lowerCamelCase_ , device=self.device )
a = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
a = image_embeddings
a = grid_points.shape[1]
a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , lowerCamelCase_ , lowerCamelCase_ ):
a = grid_points[:, i : i + points_per_batch, :, :]
a = input_labels[:, i : i + points_per_batch]
a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=0.88 , lowerCamelCase_=0.95 , lowerCamelCase_=0 , lowerCamelCase_=1 , ):
"""simple docstring"""
a = model_inputs.pop("input_boxes" )
a = model_inputs.pop("is_last" )
a = model_inputs.pop("original_sizes" ).tolist()
a = model_inputs.pop("reshaped_input_sizes" ).tolist()
a = self.model(**lowerCamelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
a = model_outputs["pred_masks"]
a = self.image_processor.post_process_masks(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , binarize=lowerCamelCase_ )
a = model_outputs["iou_scores"]
a , a , a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=0.7 , ):
"""simple docstring"""
a = []
a = []
a = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
a = torch.cat(lowerCamelCase_ )
a = torch.cat(lowerCamelCase_ )
a , a , a , a = self.image_processor.post_process_for_mask_generation(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = defaultdict(lowerCamelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase_ )
a = {}
if output_rle_mask:
a = rle_mask
if output_bboxes_mask:
a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 227
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase__ = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase__ = '''UperNetConfig'''
class _lowerCamelCase ( nn.Module ):
def __init__(self , __a , __a , __a , __a = 0 , __a = False , __a = 1 , ) -> None:
super().__init__()
UpperCamelCase = nn.Convad(
in_channels=__a , out_channels=__a , kernel_size=__a , padding=__a , bias=__a , dilation=__a , )
UpperCamelCase = nn.BatchNormad(__a )
UpperCamelCase = nn.ReLU()
def snake_case_ (self , __a ) -> torch.Tensor:
UpperCamelCase = self.conv(__a )
UpperCamelCase = self.batch_norm(__a )
UpperCamelCase = self.activation(__a )
return output
class _lowerCamelCase ( nn.Module ):
def __init__(self , __a , __a , __a ) -> None:
super().__init__()
UpperCamelCase = [
nn.AdaptiveAvgPoolad(__a ),
UperNetConvModule(__a , __a , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__a ) , __a )
def snake_case_ (self , __a ) -> torch.Tensor:
UpperCamelCase = input
for layer in self.layers:
UpperCamelCase = layer(__a )
return hidden_state
class _lowerCamelCase ( nn.Module ):
def __init__(self , __a , __a , __a , __a ) -> None:
super().__init__()
UpperCamelCase = pool_scales
UpperCamelCase = align_corners
UpperCamelCase = in_channels
UpperCamelCase = channels
UpperCamelCase = []
for i, pool_scale in enumerate(__a ):
UpperCamelCase = UperNetPyramidPoolingBlock(pool_scale=__a , in_channels=__a , channels=__a )
self.blocks.append(__a )
self.add_module(str(__a ) , __a )
def snake_case_ (self , __a ) -> List[torch.Tensor]:
UpperCamelCase = []
for ppm in self.blocks:
UpperCamelCase = ppm(__a )
UpperCamelCase = nn.functional.interpolate(
__a , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(__a )
return ppm_outs
class _lowerCamelCase ( nn.Module ):
def __init__(self , __a , __a ) -> List[str]:
super().__init__()
UpperCamelCase = config
UpperCamelCase = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase = in_channels
UpperCamelCase = config.hidden_size
UpperCamelCase = False
UpperCamelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase = UperNetConvModule(__a , self.channels , kernel_size=1 )
UpperCamelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__a )
self.fpn_convs.append(__a )
UpperCamelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def snake_case_ (self ) -> Any:
self.apply(self._init_weights )
def snake_case_ (self , __a ) -> Dict:
if isinstance(__a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case_ (self , __a ) -> Optional[int]:
UpperCamelCase = inputs[-1]
UpperCamelCase = [x]
psp_outs.extend(self.psp_modules(__a ) )
UpperCamelCase = torch.cat(__a , dim=1 )
UpperCamelCase = self.bottleneck(__a )
return output
def snake_case_ (self , __a ) -> torch.Tensor:
# build laterals
UpperCamelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__a ) )
# build top-down path
UpperCamelCase = len(__a )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase = laterals[i - 1].shape[2:]
UpperCamelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__a , mode="bilinear" , align_corners=self.align_corners )
# build outputs
UpperCamelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
UpperCamelCase = torch.cat(__a , dim=1 )
UpperCamelCase = self.fpn_bottleneck(__a )
UpperCamelCase = self.classifier(__a )
return output
class _lowerCamelCase ( nn.Module ):
def __init__(self , __a , __a = 2 , __a = 3 , __a = 1 ) -> None:
super().__init__()
UpperCamelCase = config
UpperCamelCase = config.auxiliary_in_channels
UpperCamelCase = config.auxiliary_channels
UpperCamelCase = config.auxiliary_num_convs
UpperCamelCase = config.auxiliary_concat_input
UpperCamelCase = in_index
UpperCamelCase = (kernel_size // 2) * dilation
UpperCamelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__a , padding=__a , dilation=__a ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__a , padding=__a , dilation=__a ) )
if self.num_convs == 0:
UpperCamelCase = nn.Identity()
else:
UpperCamelCase = nn.Sequential(*__a )
if self.concat_input:
UpperCamelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__a , padding=kernel_size // 2 )
UpperCamelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def snake_case_ (self ) -> List[str]:
self.apply(self._init_weights )
def snake_case_ (self , __a ) -> int:
if isinstance(__a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case_ (self , __a ) -> torch.Tensor:
# just take the relevant feature maps
UpperCamelCase = encoder_hidden_states[self.in_index]
UpperCamelCase = self.convs(__a )
if self.concat_input:
UpperCamelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase = self.classifier(__a )
return output
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = UperNetConfig
UpperCAmelCase_ = "pixel_values"
UpperCAmelCase_ = True
def snake_case_ (self , __a ) -> Optional[Any]:
if isinstance(__a , __a ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def snake_case_ (self ) -> Any:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def snake_case_ (self , __a , __a=False ) -> List[Any]:
if isinstance(__a , __a ):
UpperCamelCase = value
lowerCAmelCase__ = r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase__ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , _lowercase , )
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a ) -> Optional[int]:
super().__init__(__a )
UpperCamelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase = UperNetHead(__a , in_channels=self.backbone.channels )
UpperCamelCase = UperNetFCNHead(__a ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=__a , config_class=_CONFIG_FOR_DOC )
def snake_case_ (self , __a = None , __a = None , __a = None , __a = None , __a = None , ) -> Union[tuple, SemanticSegmenterOutput]:
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase = self.backbone.forward_with_filtered_kwargs(
__a , output_hidden_states=__a , output_attentions=__a )
UpperCamelCase = outputs.feature_maps
UpperCamelCase = self.decode_head(__a )
UpperCamelCase = nn.functional.interpolate(__a , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__a )
UpperCamelCase = None
if self.auxiliary_head is not None:
UpperCamelCase = self.auxiliary_head(__a )
UpperCamelCase = nn.functional.interpolate(
__a , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__a )
UpperCamelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
UpperCamelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase = loss_fct(__a , __a )
UpperCamelCase = loss_fct(__a , __a )
UpperCamelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase = (logits,) + outputs[1:]
else:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__a , logits=__a , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 356
|
"""simple docstring"""
import math
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = int(math.floor(math.sqrt(_SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase = 0
while arr[min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - 1] < x:
UpperCamelCase = step
step += int(math.floor(math.sqrt(_SCREAMING_SNAKE_CASE ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCamelCase = prev + 1
if prev == min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
lowerCAmelCase__ = int(input('''Enter the number to be searched:\n'''))
lowerCAmelCase__ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 244
| 0
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_lowerCamelCase : Optional[int] = TypeVar("KEY")
_lowerCamelCase : int = TypeVar("VAL")
@dataclass(frozen=UpperCAmelCase_ , slots=UpperCAmelCase_ )
class __UpperCAmelCase ( Generic[KEY, VAL] ):
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( _Item ):
def __init__( self : str ):
super().__init__(__UpperCAmelCase, __UpperCAmelCase )
def __bool__( self : List[Any] ):
return False
_lowerCamelCase : List[Any] = _DeletedItem()
class __UpperCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self : List[str], __A : int = 8, __A : float = 0.7_5 ):
UpperCAmelCase : Union[str, Any] = initial_block_size
UpperCAmelCase : int = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCAmelCase : str = capacity_factor
UpperCAmelCase : Union[str, Any] = 0
def __magic_name__ ( self : str, __A : KEY ):
return hash(__UpperCAmelCase ) % len(self._buckets )
def __magic_name__ ( self : Optional[int], __A : int ):
return (ind + 1) % len(self._buckets )
def __magic_name__ ( self : Optional[int], __A : int, __A : KEY, __A : VAL ):
UpperCAmelCase : List[str] = self._buckets[ind]
if not stored:
UpperCAmelCase : Dict = _Item(__UpperCAmelCase, __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
UpperCAmelCase : Tuple = _Item(__UpperCAmelCase, __UpperCAmelCase )
return True
else:
return False
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def __magic_name__ ( self : Dict ):
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCAmelCase : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __magic_name__ ( self : int, __A : int ):
UpperCAmelCase : Dict = self._buckets
UpperCAmelCase : Union[str, Any] = [None] * new_size
UpperCAmelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def __magic_name__ ( self : str ):
self._resize(len(self._buckets ) * 2 )
def __magic_name__ ( self : List[str] ):
self._resize(len(self._buckets ) // 2 )
def __magic_name__ ( self : Dict, __A : KEY ):
UpperCAmelCase : int = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
UpperCAmelCase : Optional[int] = self._get_next_ind(__UpperCAmelCase )
def __magic_name__ ( self : Optional[Any], __A : KEY, __A : VAL ):
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ):
break
def __setitem__( self : List[Any], __A : KEY, __A : VAL ):
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase, __UpperCAmelCase )
def __delitem__( self : str, __A : KEY ):
for ind in self._iterate_buckets(__UpperCAmelCase ):
UpperCAmelCase : int = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
UpperCAmelCase : int = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Dict, __A : KEY ):
for ind in self._iterate_buckets(__UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self : int ):
return self._len
def __iter__( self : Optional[Any] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
UpperCAmelCase : Dict = ''' ,'''.join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 336
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , 'Tatoeba directory does not exist.' )
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase )
@slow
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 65
| 0
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (UnCLIPScheduler,)
def A ( self : Union[str, Any] , **UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**UpperCamelCase__ )
return config
def A ( self : str ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(variance_type='fixed_small_log' )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(variance_type='learned_range' )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -1_0.1_7_1_2_7_9_0 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=UpperCamelCase__ ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=UpperCamelCase__ ) - -0.0_0_1_0_0_1_1 < 1E-5
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase = scheduler.timesteps
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(2_5 )
UpperCamelCase = scheduler.timesteps
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
UpperCamelCase = None
else:
UpperCamelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def A ( self : Tuple ):
"""simple docstring"""
pass
def A ( self : Optional[int] ):
"""simple docstring"""
pass
| 362
|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCamelCase : Dict = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_lowerCamelCase : Any = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_lowerCamelCase : Dict = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowerCamelCase : List[str] = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowerCamelCase : Any = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(A__ , A__ )
return k
def __lowerCamelCase ( A__ , A__ ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**A__ )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(A__ )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
UpperCamelCase = [k.endswith(A__ ) for ending in KEYS_TO_IGNORE]
if any(A__ ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(A__ , A__ )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(A__ )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
UpperCamelCase = [k.endswith(A__ ) for ending in KEYS_TO_IGNORE]
if any(A__ ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(A__ , A__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(A__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
UpperCamelCase = mapping['model.embed_positions.weight']
UpperCamelCase = mapping.pop('model.embed_positions.weight' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(A__ , strict=A__ )
UpperCamelCase = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(A__ )
UpperCamelCase = {}
UpperCamelCase = ['global_step']
for name, shape in tqdm(A__ , desc='converting tf checkpoint to dict' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(A__ , A__ )
UpperCamelCase = array
return tf_weights
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(A__ )
UpperCamelCase = convert_bigbird_pegasus(A__ , A__ )
torch_model.save_pretrained(A__ )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : Tuple = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 249
| 0
|
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = DistilBertTokenizer
_a = DistilBertTokenizerFast
_a = True
@slow
def __lowercase ( self : List[str] ):
lowerCAmelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase )
lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 155
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
a = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def lowercase (snake_case__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
return sd
def lowercase (snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=rename_keys_prefix ) -> Dict:
'''simple docstring'''
lowerCAmelCase = OrderedDict()
lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCAmelCase = key
for name_pair in rename_keys_prefix:
lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCAmelCase = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def lowercase (snake_case__ : List[Any] , snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowerCAmelCase = """pretraining"""
if "vcr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 512}
lowerCAmelCase = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
lowerCAmelCase = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
lowerCAmelCase = """vqa"""
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
lowerCAmelCase = """nlvr"""
lowerCAmelCase = VisualBertConfig(**snake_case__ )
# Load State Dict
lowerCAmelCase = load_state_dict(snake_case__ )
lowerCAmelCase = get_new_dict(snake_case__ , snake_case__ )
if model_type == "pretraining":
lowerCAmelCase = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
lowerCAmelCase = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
lowerCAmelCase = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
lowerCAmelCase = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 155
| 1
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = "https://openaipublic.azureedge.net/jukebox/models/"
_SCREAMING_SNAKE_CASE = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
snake_case = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
snake_case = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
snake_case = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Any:
snake_case = {}
import re
snake_case = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
snake_case = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__lowerCAmelCase ):
snake_case = re_encoder_block_conv_in.match(__lowerCAmelCase )
snake_case = regex_match.groups()
snake_case = int(groups[2] ) * 2 + int(groups[3] )
snake_case = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
snake_case = re_encoder_block_conv_in.sub(__lowerCAmelCase , __lowerCAmelCase )
elif re_encoder_block_resnet.fullmatch(__lowerCAmelCase ):
snake_case = re_encoder_block_resnet.match(__lowerCAmelCase )
snake_case = regex_match.groups()
snake_case = int(groups[2] ) * 2 + int(groups[3] )
snake_case = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
snake_case = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case = prefix + resnet_block
snake_case = re_encoder_block_resnet.sub(__lowerCAmelCase , __lowerCAmelCase )
elif re_encoder_block_proj_out.fullmatch(__lowerCAmelCase ):
snake_case = re_encoder_block_proj_out.match(__lowerCAmelCase )
snake_case = regex_match.groups()
snake_case = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
snake_case = re_encoder_block_proj_out.sub(__lowerCAmelCase , __lowerCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__lowerCAmelCase ):
snake_case = re_decoder_block_conv_out.match(__lowerCAmelCase )
snake_case = regex_match.groups()
snake_case = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
snake_case = re_decoder_block_conv_out.sub(__lowerCAmelCase , __lowerCAmelCase )
elif re_decoder_block_resnet.fullmatch(__lowerCAmelCase ):
snake_case = re_decoder_block_resnet.match(__lowerCAmelCase )
snake_case = regex_match.groups()
snake_case = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
snake_case = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case = prefix + resnet_block
snake_case = re_decoder_block_resnet.sub(__lowerCAmelCase , __lowerCAmelCase )
elif re_decoder_block_proj_in.fullmatch(__lowerCAmelCase ):
snake_case = re_decoder_block_proj_in.match(__lowerCAmelCase )
snake_case = regex_match.groups()
snake_case = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
snake_case = re_decoder_block_proj_in.sub(__lowerCAmelCase , __lowerCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__lowerCAmelCase ):
snake_case = re_prior_cond_conv_out.match(__lowerCAmelCase )
snake_case = regex_match.groups()
snake_case = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
snake_case = re_prior_cond_conv_out.sub(__lowerCAmelCase , __lowerCAmelCase )
elif re_prior_cond_resnet.fullmatch(__lowerCAmelCase ):
snake_case = re_prior_cond_resnet.match(__lowerCAmelCase )
snake_case = regex_match.groups()
snake_case = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
snake_case = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case = prefix + resnet_block
snake_case = re_prior_cond_resnet.sub(__lowerCAmelCase , __lowerCAmelCase )
elif re_prior_cond_proj_in.fullmatch(__lowerCAmelCase ):
snake_case = re_prior_cond_proj_in.match(__lowerCAmelCase )
snake_case = regex_match.groups()
snake_case = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
snake_case = re_prior_cond_proj_in.sub(__lowerCAmelCase , __lowerCAmelCase )
# keep original key
else:
snake_case = original_key
snake_case = replace_key(__lowerCAmelCase )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
snake_case = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
snake_case = original_key
snake_case = original_key
snake_case = value
return new_dict
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict=None ) -> Union[str, Any]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
snake_case = requests.get(F'''{PREFIX}{file}''' , allow_redirects=__lowerCAmelCase )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__lowerCAmelCase )
open(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , """wb""" ).write(r.content )
snake_case = MODEL_MAPPING[model_name.split("""/""" )[-1]]
snake_case = JukeboxConfig.from_pretrained(__lowerCAmelCase )
snake_case = JukeboxModel(__lowerCAmelCase )
snake_case = []
snake_case = {}
for i, dict_name in enumerate(__lowerCAmelCase ):
snake_case = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""]
snake_case = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
snake_case = old_dic[k]
elif k.endswith(""".w""" ):
snake_case = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case = old_dic[k]
else:
snake_case = old_dic[k]
snake_case = """vqvae""" if i == 0 else F'''priors.{3 - i}'''
snake_case = fix_jukebox_keys(__lowerCAmelCase , model.state_dict() , __lowerCAmelCase , __lowerCAmelCase )
weight_dict.append(__lowerCAmelCase )
snake_case = weight_dict.pop(0 )
model.vqvae.load_state_dict(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
return weight_dict
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 3
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3
| 1
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 0
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__UpperCAmelCase : str = AutoTokenizer.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase ) , 0 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = AutoTokenizer.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
# Check that tokenizer_type ≠ model_type
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase , config=UpperCamelCase )
self.assertIsInstance(UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCamelCase , """vocab.txt""" ) )
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase , tokenizer_type="""bert""" , use_fast=UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCamelCase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCamelCase , """merges.txt""" ) )
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase , tokenizer_type="""gpt2""" , use_fast=UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
@require_tokenizers
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCamelCase , """vocab.txt""" ) )
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase , tokenizer_type="""bert""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCamelCase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCamelCase , """merges.txt""" ) )
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase , tokenizer_type="""gpt2""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
with pytest.raises(UpperCamelCase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase , UpperCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
__UpperCAmelCase : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = TOKENIZER_MAPPING.values()
__UpperCAmelCase : str = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase )
@require_tokenizers
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCamelCase ) , UpperCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , UpperCamelCase )
@require_tokenizers
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = """Hello, world. How are you?"""
__UpperCAmelCase : Dict = tokenizer.tokenize(UpperCamelCase )
self.assertEqual("""[UNK]""" , tokens[0] )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=UpperCamelCase )
__UpperCAmelCase : Any = tokenizer.tokenize(UpperCamelCase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : str = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : int = get_tokenizer_config("""bert-base-cased""" )
__UpperCAmelCase : Any = config.pop("""_commit_hash""" , UpperCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__UpperCAmelCase : List[Any] = get_tokenizer_config(UpperCamelCase )
self.assertDictEqual(UpperCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase )
__UpperCAmelCase : List[str] = get_tokenizer_config(UpperCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , UpperCamelCase )
AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase ):
AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase )
__UpperCAmelCase : str = CustomTokenizer.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , UpperCamelCase )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase , fast_tokenizer_class=UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase , slow_tokenizer_class=UpperCamelCase , fast_tokenizer_class=UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase ):
AutoTokenizer.register(UpperCamelCase , fast_tokenizer_class=UpperCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : List[str] = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Tuple = CustomTokenizerFast.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase )
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase , use_fast=UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase ):
__UpperCAmelCase : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase ):
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase )
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase , trust_remote_code=UpperCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = False
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = NewTokenizer
__a = False
try:
AutoConfig.register("""custom""" , UpperCamelCase )
AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase )
AutoTokenizer.register(UpperCamelCase , fast_tokenizer_class=UpperCamelCase )
# If remote code is not set, the default is to use local
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__UpperCAmelCase : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
__UpperCAmelCase : str = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase , revision="""aaaaaa""" )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 115
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = """ylacombe/bark-small"""
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[Any] = """en_speaker_1"""
__UpperCAmelCase : Union[str, Any] = """This is a test string"""
__UpperCAmelCase : Dict = """speaker_embeddings_path.json"""
__UpperCAmelCase : Any = """speaker_embeddings"""
def lowerCamelCase__ ( self : Dict , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Any = BarkProcessor(tokenizer=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__UpperCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase : Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__UpperCAmelCase : List[str] = 35
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Union[str, Any] = 8
__UpperCAmelCase : Optional[Any] = {
"""semantic_prompt""": np.ones(UpperCamelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__UpperCAmelCase : Dict = processor(text=self.input_string , voice_preset=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Optional[int] = processor(text=self.input_string , voice_preset=UpperCamelCase )
__UpperCAmelCase : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__UpperCAmelCase : Dict = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = BarkProcessor(tokenizer=UpperCamelCase )
__UpperCAmelCase : List[str] = processor(text=self.input_string )
__UpperCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCamelCase , return_attention_mask=UpperCamelCase , return_token_type_ids=UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 115
| 1
|
"""simple docstring"""
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__magic_name__ = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : Union[str, Any] = 42
__lowercase : Optional[Any] = None
__lowercase : List[Any] = None
__lowercase : List[Any] = None
__lowercase : Dict = None
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = _str_to_version_tuple(self.version_str)
def __repr__( self):
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def snake_case_ ( self):
return self.major, self.minor, self.patch
def snake_case_ ( self , lowerCAmelCase__):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return Version(UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return other
raise TypeError(f"{other} (type {type(UpperCAmelCase_)}) cannot be compared to version.")
def __eq__( self , lowerCAmelCase__):
try:
__SCREAMING_SNAKE_CASE = self._validate_operand(UpperCAmelCase_)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self._validate_operand(UpperCAmelCase_)
return self.tuple < other.tuple
def __hash__( self):
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def snake_case_ ( cls , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def snake_case_ ( self):
return self.version_str
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(_UpperCAmelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def _lowerCAmelCase ( UpperCamelCase_ ):
return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
| 365
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__magic_name__ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}." )
if not ops[op](version.parse(UpperCamelCase_ ) , version.parse(UpperCamelCase_ ) ):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = None ):
__SCREAMING_SNAKE_CASE = f"\n{hint}" if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = requirement, None, None
else:
__SCREAMING_SNAKE_CASE = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , UpperCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f" got {requirement}" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = match[0]
__SCREAMING_SNAKE_CASE = want_full.split(""",""" ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE = {}
for w in want_range:
__SCREAMING_SNAKE_CASE = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , UpperCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f" but got {requirement}" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = match[0]
__SCREAMING_SNAKE_CASE = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE = """.""".join([str(UpperCamelCase_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE = importlib.metadata.version(UpperCamelCase_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(UpperCamelCase_ , UpperCamelCase_ )
| 255
| 0
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase ( _UpperCamelCase : Dict[str, torch.Tensor] ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Union[str, Any] = []
for rt in rc.restypes:
__UpperCAmelCase : str = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__UpperCAmelCase : str = {name: i for i, name in enumerate(__a )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__UpperCAmelCase : Union[str, Any] = torch.tensor(
__a , dtype=torch.intaa , device=protein["""aatype"""].device , )
__UpperCAmelCase : str = torch.tensor(
__a , dtype=torch.intaa , device=protein["""aatype"""].device , )
__UpperCAmelCase : Union[str, Any] = torch.tensor(
__a , dtype=torch.floataa , device=protein["""aatype"""].device , )
__UpperCAmelCase : str = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__UpperCAmelCase : Optional[Any] = restype_atomaa_to_atomaa[protein_aatype]
__UpperCAmelCase : List[str] = restype_atomaa_mask[protein_aatype]
__UpperCAmelCase : int = residx_atomaa_mask
__UpperCAmelCase : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__UpperCAmelCase : str = restype_atomaa_to_atomaa[protein_aatype]
__UpperCAmelCase : List[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__UpperCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
__UpperCAmelCase : Optional[Any] = rc.restype_atoa[restype_letter]
__UpperCAmelCase : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__UpperCAmelCase : Tuple = rc.atom_order[atom_name]
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : Dict = restype_atomaa_mask[protein_aatype]
__UpperCAmelCase : str = residx_atomaa_mask
return protein
def lowerCamelCase ( _UpperCamelCase : Dict[str, torch.Tensor] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = tree_map(lambda _UpperCamelCase : torch.tensor(__a , device=batch["""aatype"""].device ) , __a , np.ndarray )
__UpperCAmelCase : Dict = tensor_tree_map(lambda _UpperCamelCase : np.array(__a ) , make_atomaa_masks(__a ) )
return out
| 115
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __magic_name__ ( __a : Optional[int] , __a : Union[str, Any] , __a : Union[str, Any]=1_024 , __a : str=1_024 , __a : Optional[Any]=False , **__a : Tuple ):
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(__a )
UpperCamelCase__ = SeqaSeqDataset(__a , __a , __a , __a , type_path="""train""" , **__a )
UpperCamelCase__ = tok.pad_token_id
def get_lens(__a : Optional[int] ):
UpperCamelCase__ = tqdm(
DataLoader(__a , batch_size=512 , num_workers=8 , shuffle=__a , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase__ = []
for batch in dl:
UpperCamelCase__ = batch["""input_ids"""].ne(__a ).sum(1 ).tolist()
UpperCamelCase__ = batch["""labels"""].ne(__a ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__a , __a ):
max_lens.append(max(__a , __a ) )
else:
max_lens.extend(__a )
return max_lens
UpperCamelCase__ = get_lens(__a )
UpperCamelCase__ = SeqaSeqDataset(__a , __a , __a , __a , type_path="""val""" , **__a )
UpperCamelCase__ = get_lens(__a )
pickle_save(__a , train_ds.len_file )
pickle_save(__a , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 244
| 0
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCAmelCase__ ( _a : int = 8 ):
snake_case_ : Any = ascii_letters + digits + punctuation
return "".join(secrets.choice(_a ) for _ in range(_a ) )
def lowerCAmelCase__ ( _a : str , _a : int ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_a )
snake_case_ : Dict = i // 3
snake_case_ : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ : Dict = (
chars_incl
+ random(_a , quotient + remainder )
+ random(_a , _a )
+ random(_a , _a )
)
snake_case_ : Union[str, Any] = list(_a )
shuffle(_a )
return "".join(_a )
# random is a generalised function for letters, characters and numbers
def lowerCAmelCase__ ( _a : str , _a : int ):
return "".join(secrets.choice(_a ) for _ in range(_a ) )
def lowerCAmelCase__ ( _a : Optional[Any] , _a : int ):
pass # Put your code here...
def lowerCAmelCase__ ( _a : Dict , _a : Union[str, Any] ):
pass # Put your code here...
def lowerCAmelCase__ ( _a : Tuple , _a : str ):
pass # Put your code here...
def lowerCAmelCase__ ( _a : str , _a : int = 8 ):
if len(_a ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ : Dict = any(char in ascii_uppercase for char in password )
snake_case_ : List[str] = any(char in ascii_lowercase for char in password )
snake_case_ : Union[str, Any] = any(char in digits for char in password )
snake_case_ : Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCAmelCase__ ( ):
snake_case_ : int = int(input("Please indicate the max length of your password: " ).strip() )
snake_case_ : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_a ) )
print(
"Alternative Password generated:" , alternative_password_generator(_a , _a ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 36
|
import argparse
import copy
def lowerCAmelCase__ ( _a : List[Any] ):
snake_case_ : List[Any] = {}
with open(_a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Dict = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : Dict = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCAmelCase__ ( _a : Optional[Any] , _a : Optional[int] ):
with open(_a ) as f:
snake_case_ : List[str] = f.read(1 )
snake_case_ : Optional[Any] = start_node
snake_case_ : Optional[Any] = []
snake_case_ : Optional[int] = start_node
snake_case_ : int = 0
while visiting not in first_solution:
snake_case_ : List[str] = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_a ) and k[0] not in first_solution:
snake_case_ : List[str] = k[1]
snake_case_ : Dict = k[0]
first_solution.append(_a )
snake_case_ : Dict = distance_of_first_solution + int(_a )
snake_case_ : Optional[int] = best_node
first_solution.append(_a )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def lowerCAmelCase__ ( _a : Optional[int] , _a : List[str] ):
snake_case_ : Optional[Any] = []
for n in solution[1:-1]:
snake_case_ : Any = solution.index(_a )
for kn in solution[1:-1]:
snake_case_ : Any = solution.index(_a )
if n == kn:
continue
snake_case_ : Optional[int] = copy.deepcopy(_a )
snake_case_ : int = kn
snake_case_ : Any = n
snake_case_ : List[Any] = 0
for k in _tmp[:-1]:
snake_case_ : str = _tmp[_tmp.index(_a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Any = distance + int(i[1] )
_tmp.append(_a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCAmelCase__ ( _a : Dict , _a : Optional[int] , _a : Optional[Any] , _a : Union[str, Any] , _a : int ):
snake_case_ : str = 1
snake_case_ : List[str] = first_solution
snake_case_ : int = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : int = solution
while count <= iters:
snake_case_ : Optional[Any] = find_neighborhood(_a , _a )
snake_case_ : Union[str, Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Dict = len(_a ) - 1
snake_case_ : List[Any] = False
while not found:
snake_case_ : int = 0
while i < len(_a ):
if best_solution[i] != solution[i]:
snake_case_ : str = best_solution[i]
snake_case_ : Any = solution[i]
break
snake_case_ : Dict = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Optional[Any] = True
snake_case_ : Optional[int] = best_solution[:-1]
snake_case_ : List[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Union[str, Any] = cost
snake_case_ : Optional[int] = solution
else:
snake_case_ : Union[str, Any] = index_of_best_solution + 1
snake_case_ : int = neighborhood[index_of_best_solution]
if len(_a ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def lowerCAmelCase__ ( _a : str=None ):
snake_case_ : Optional[Any] = generate_neighbours(args.File )
snake_case_ , snake_case_ : List[Any] = generate_first_solution(
args.File , _a )
snake_case_ , snake_case_ : int = tabu_search(
_a , _a , _a , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 36
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : str = SpeechTaTokenizer
__A : int = False
__A : str = True
def __lowercase ( self) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__ : str = SpeechTaTokenizer(UpperCamelCase_)
a__ : Any = AddedToken('<mask>' , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_)
a__ : List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
tokenizer.save_pretrained(self.tmpdirname)
def __lowercase ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__ : List[str] = '''this is a test'''
a__ : Union[str, Any] = '''this is a test'''
return input_text, output_text
def __lowercase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = self.get_input_output_texts(UpperCamelCase_)
a__ : List[str] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
a__ : Dict = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_)
return text, ids
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = '''<pad>'''
a__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_) , UpperCamelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_) , UpperCamelCase_)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-4] , 'œ')
self.assertEqual(vocab_keys[-2] , '<mask>')
self.assertEqual(vocab_keys[-1] , '<ctc_blank>')
self.assertEqual(len(UpperCamelCase_) , 81)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = self.get_tokenizers(do_lower_case=UpperCamelCase_)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
a__ : int = tokenizer.vocab_size
a__ : List[Any] = len(UpperCamelCase_)
self.assertNotEqual(UpperCamelCase_ , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a__ : Union[str, Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
a__ : int = tokenizer.add_tokens(UpperCamelCase_)
a__ : Any = tokenizer.vocab_size
a__ : Any = len(UpperCamelCase_)
self.assertNotEqual(UpperCamelCase_ , 0)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_))
self.assertEqual(UpperCamelCase_ , all_size + len(UpperCamelCase_))
a__ : int = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCamelCase_)
self.assertGreaterEqual(len(UpperCamelCase_) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
a__ : Dict = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
a__ : Tuple = tokenizer.add_special_tokens(UpperCamelCase_)
a__ : Dict = tokenizer.vocab_size
a__ : int = len(UpperCamelCase_)
self.assertNotEqual(UpperCamelCase_ , 0)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_))
self.assertEqual(UpperCamelCase_ , all_size_a + len(UpperCamelCase_))
a__ : List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCamelCase_)
self.assertGreaterEqual(len(UpperCamelCase_) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
pass
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : str = self.get_tokenizer()
a__ : str = tokenizer.tokenize('This is a test')
# fmt: off
self.assertListEqual(UpperCamelCase_ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
a__ : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
UpperCamelCase_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
a__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCamelCase_)
# fmt: off
self.assertListEqual(UpperCamelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
a__ : str = tokenizer.convert_ids_to_tokens(UpperCamelCase_)
self.assertListEqual(
UpperCamelCase_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Any = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
a__ : Optional[Any] = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=UpperCamelCase_ , )
| 99
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[int] = 3_84
__lowercase : str = 7
if "tiny" in model_name:
__lowercase : List[str] = 96
__lowercase : Any = (2, 2, 6, 2)
__lowercase : Dict = (3, 6, 12, 24)
elif "small" in model_name:
__lowercase : str = 96
__lowercase : Optional[int] = (2, 2, 18, 2)
__lowercase : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
__lowercase : Tuple = 1_28
__lowercase : Tuple = (2, 2, 18, 2)
__lowercase : int = (4, 8, 16, 32)
__lowercase : str = 12
__lowercase : Any = 5_12
elif "large" in model_name:
__lowercase : List[str] = 1_92
__lowercase : List[Any] = (2, 2, 18, 2)
__lowercase : Optional[Any] = (6, 12, 24, 48)
__lowercase : Optional[int] = 12
__lowercase : Optional[Any] = 7_68
# set label information
__lowercase : Any = 1_50
__lowercase : Tuple = '''huggingface/label-files'''
__lowercase : int = '''ade20k-id2label.json'''
__lowercase : Union[str, Any] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : Union[str, Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
__lowercase : Any = SwinConfig(
embed_dim=__UpperCamelCase , depths=__UpperCamelCase , num_heads=__UpperCamelCase , window_size=__UpperCamelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
__lowercase : List[Any] = UperNetConfig(
backbone_config=__UpperCamelCase , auxiliary_in_channels=__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : str = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Any = dct.pop(__UpperCamelCase )
__lowercase : Any = val
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase : Dict = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
__lowercase : int = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : List[Any] = in_proj_weight[:dim, :]
__lowercase : Tuple = in_proj_bias[: dim]
__lowercase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
__lowercase : int = in_proj_bias[
dim : dim * 2
]
__lowercase : str = in_proj_weight[
-dim :, :
]
__lowercase : List[Any] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase ,__lowercase : str = x.shape
__lowercase : List[str] = x.reshape(__UpperCamelCase , 4 , in_channel // 4 )
__lowercase : Dict = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__UpperCamelCase , __UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase ,__lowercase : Optional[int] = x.shape
__lowercase : Union[str, Any] = x.reshape(__UpperCamelCase , in_channel // 4 , 4 )
__lowercase : int = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__UpperCamelCase , __UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : int = x.shape[0]
__lowercase : List[str] = x.reshape(4 , in_channel // 4 )
__lowercase : Any = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Union[str, Any] = x.shape[0]
__lowercase : List[str] = x.reshape(in_channel // 4 , 4 )
__lowercase : Union[str, Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
__lowercase : Any = model_name_to_url[model_name]
__lowercase : Any = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' , file_name=__UpperCamelCase )[
'''state_dict'''
]
for name, param in state_dict.items():
print(__UpperCamelCase , param.shape )
__lowercase : Tuple = get_upernet_config(__UpperCamelCase )
__lowercase : List[Any] = UperNetForSemanticSegmentation(__UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowercase : Optional[Any] = state_dict.pop(__UpperCamelCase )
if "bn" in key:
__lowercase : List[Any] = key.replace('''bn''' , '''batch_norm''' )
__lowercase : Optional[Any] = val
# rename keys
__lowercase : Tuple = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowercase : Optional[Any] = reverse_correct_unfold_reduction_order(__UpperCamelCase )
if "norm" in key:
__lowercase : Optional[Any] = reverse_correct_unfold_norm_order(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify on image
__lowercase : Any = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__lowercase : str = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' )
__lowercase : Union[str, Any] = SegformerImageProcessor()
__lowercase : int = processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__lowercase : List[Any] = model(__UpperCamelCase )
__lowercase : Union[str, Any] = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowercase : Tuple = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
__lowercase : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
__lowercase : Optional[int] = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
__lowercase : Any = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F"upernet-swin-{size}" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 249
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCamelCase_ = False
@skip_mps
class _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionAttendAndExcitePipeline
A : Any = False
A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
A : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
A : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__SCREAMING_SNAKE_CASE )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=__SCREAMING_SNAKE_CASE, )
SCREAMING_SNAKE_CASE : Dict = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=__SCREAMING_SNAKE_CASE, set_alpha_to_one=__SCREAMING_SNAKE_CASE, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextModel(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 'cpu'
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = pipe(**__SCREAMING_SNAKE_CASE ).images
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 64, 64, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__SCREAMING_SNAKE_CASE )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(51 )
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', safety_checker=__SCREAMING_SNAKE_CASE, torch_dtype=torch.floataa )
pipe.to('cuda' )
SCREAMING_SNAKE_CASE : List[Any] = 'a painting of an elephant with glasses'
SCREAMING_SNAKE_CASE : Optional[int] = [5, 7]
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
prompt=__SCREAMING_SNAKE_CASE, token_indices=__SCREAMING_SNAKE_CASE, guidance_scale=7.5, generator=__SCREAMING_SNAKE_CASE, num_inference_steps=5, max_iter_to_alter=5, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 352
|
'''simple docstring'''
from __future__ import annotations
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = array[indexa], array[indexa]
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
if length > 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = int(length / 2 )
for i in range(__UpperCamelCase ,low + middle ):
comp_and_swap(__UpperCamelCase ,__UpperCamelCase ,i + middle ,__UpperCamelCase )
bitonic_merge(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
bitonic_merge(__UpperCamelCase ,low + middle ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
if length > 1:
SCREAMING_SNAKE_CASE : Dict = int(length / 2 )
bitonic_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,1 )
bitonic_sort(__UpperCamelCase ,low + middle ,__UpperCamelCase ,0 )
bitonic_merge(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase_ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 246
| 0
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Dict = 'https://openaipublic.azureedge.net/jukebox/models/'
lowercase : Any = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
A : Dict = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
A : Union[str, Any] = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
A : int = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
A : List[str] = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
A : Dict = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
A : List[str] = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A : Any = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
A : List[Any] = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = {}
import re
A : int = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
A : Optional[int] = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
A : int = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
A : Any = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
A : int = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
A : Any = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
A : List[Any] = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
A : Dict = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
A : Any = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case__ ):
A : Dict = re_encoder_block_conv_in.match(snake_case__ )
A : int = regex_match.groups()
A : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
A : str = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
A : Optional[Any] = re_encoder_block_conv_in.sub(snake_case__ , snake_case__ )
elif re_encoder_block_resnet.fullmatch(snake_case__ ):
A : Any = re_encoder_block_resnet.match(snake_case__ )
A : int = regex_match.groups()
A : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
A : List[str] = {'''1''': 1, '''3''': 2}[groups[-2]]
A : Optional[int] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
A : Any = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A : Union[str, Any] = prefix + resnet_block
A : Tuple = re_encoder_block_resnet.sub(snake_case__ , snake_case__ )
elif re_encoder_block_proj_out.fullmatch(snake_case__ ):
A : Optional[Any] = re_encoder_block_proj_out.match(snake_case__ )
A : str = regex_match.groups()
A : str = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
A : Dict = re_encoder_block_proj_out.sub(snake_case__ , snake_case__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case__ ):
A : Dict = re_decoder_block_conv_out.match(snake_case__ )
A : int = regex_match.groups()
A : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
A : List[str] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
A : Optional[int] = re_decoder_block_conv_out.sub(snake_case__ , snake_case__ )
elif re_decoder_block_resnet.fullmatch(snake_case__ ):
A : List[Any] = re_decoder_block_resnet.match(snake_case__ )
A : Any = regex_match.groups()
A : int = int(groups[2] ) * 2 + int(groups[3] ) - 2
A : int = {'''1''': 1, '''3''': 2}[groups[-2]]
A : Optional[Any] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
A : Union[str, Any] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A : str = prefix + resnet_block
A : Optional[Any] = re_decoder_block_resnet.sub(snake_case__ , snake_case__ )
elif re_decoder_block_proj_in.fullmatch(snake_case__ ):
A : Optional[int] = re_decoder_block_proj_in.match(snake_case__ )
A : Tuple = regex_match.groups()
A : Dict = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
A : List[Any] = re_decoder_block_proj_in.sub(snake_case__ , snake_case__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case__ ):
A : List[Any] = re_prior_cond_conv_out.match(snake_case__ )
A : List[str] = regex_match.groups()
A : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
A : Tuple = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
A : Optional[Any] = re_prior_cond_conv_out.sub(snake_case__ , snake_case__ )
elif re_prior_cond_resnet.fullmatch(snake_case__ ):
A : Dict = re_prior_cond_resnet.match(snake_case__ )
A : Union[str, Any] = regex_match.groups()
A : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
A : List[Any] = {'''1''': 1, '''3''': 2}[groups[-2]]
A : Any = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
A : List[Any] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A : Any = prefix + resnet_block
A : str = re_prior_cond_resnet.sub(snake_case__ , snake_case__ )
elif re_prior_cond_proj_in.fullmatch(snake_case__ ):
A : Optional[Any] = re_prior_cond_proj_in.match(snake_case__ )
A : Tuple = regex_match.groups()
A : List[Any] = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
A : str = re_prior_cond_proj_in.sub(snake_case__ , snake_case__ )
# keep original key
else:
A : Optional[Any] = original_key
A : Any = replace_key(snake_case__ )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
A : Dict = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
A : Optional[Any] = original_key
A : str = original_key
A : Optional[int] = value
return new_dict
@torch.no_grad()
def lowerCAmelCase_ ( snake_case__=None , snake_case__=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
A : int = requests.get(F'{PREFIX}{file}' , allow_redirects=snake_case__ )
os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=snake_case__ )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
A : int = MODEL_MAPPING[model_name.split('''/''' )[-1]]
A : List[Any] = JukeboxConfig.from_pretrained(snake_case__ )
A : Optional[Any] = JukeboxModel(snake_case__ )
A : Optional[Any] = []
A : Tuple = {}
for i, dict_name in enumerate(snake_case__ ):
A : Union[str, Any] = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
A : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
A : Optional[int] = old_dic[k]
elif k.endswith('''.w''' ):
A : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A : Dict = old_dic[k]
else:
A : int = old_dic[k]
A : List[Any] = '''vqvae''' if i == 0 else F'priors.{3 - i}'
A : Tuple = fix_jukebox_keys(snake_case__ , model.state_dict() , snake_case__ , snake_case__ )
weight_dict.append(snake_case__ )
A : Any = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case__ )
for i in range(len(snake_case__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
with open(F'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case__ , snake_case__ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
return weight_dict
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
lowercase : Union[str, Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 3
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' )
A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase : str = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 20_18,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 3
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = "bit"
_a = ["preactivation", "bottleneck"]
_a = ["SAME", "VALID"]
def __init__( self , _a=3 , _a=64 , _a=[256, 512, 1024, 2048] , _a=[3, 4, 6, 3] , _a="preactivation" , _a="relu" , _a=None , _a=32 , _a=0.0 , _a=False , _a=32 , _a=1 , _a=None , _a=None , **_a , ) -> Optional[Any]:
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_A : List[str] = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
_A : Optional[int] = num_channels
_A : List[Any] = embedding_size
_A : List[Any] = hidden_sizes
_A : Any = depths
_A : Any = layer_type
_A : List[Any] = hidden_act
_A : int = global_padding
_A : List[str] = num_groups
_A : Any = drop_path_rate
_A : List[Any] = embedding_dynamic_padding
_A : Union[str, Any] = output_stride
_A : List[str] = width_factor
_A : Any = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
_A , _A : int = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 343
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Load checkpoint
_A : Optional[int] = torch.load(snake_case_,map_location="""cpu""" )
_A : Any = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_A : Tuple = v
else:
_A : Dict = v
_A : Optional[Any] = chkpt["""params"""]
_A : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(snake_case_,(torch.FloatTensor, numpy.ndarray) )}
_A : str = chkpt["""dico_word2id"""]
_A : Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""","""""" ): i for s, i in vocab.items()}
# Save pytorch-model
_A : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A : Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case_,snake_case_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: List[Any] = logging.get_logger(__name__)
_UpperCamelCase: int = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'megatron-bert'
def __init__( self : int, lowerCAmelCase : List[Any]=29056, lowerCAmelCase : int=1024, lowerCAmelCase : List[str]=24, lowerCAmelCase : Union[str, Any]=16, lowerCAmelCase : Union[str, Any]=4096, lowerCAmelCase : Dict="gelu", lowerCAmelCase : List[str]=0.1, lowerCAmelCase : Any=0.1, lowerCAmelCase : str=512, lowerCAmelCase : str=2, lowerCAmelCase : Any=0.02, lowerCAmelCase : Any=1e-12, lowerCAmelCase : List[str]=0, lowerCAmelCase : List[str]="absolute", lowerCAmelCase : Any=True, **lowerCAmelCase : Union[str, Any], ) -> Tuple:
super().__init__(pad_token_id=lowerCAmelCase, **lowerCAmelCase )
lowercase : Tuple = vocab_size
lowercase : Any = hidden_size
lowercase : int = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : Optional[int] = hidden_act
lowercase : Optional[int] = intermediate_size
lowercase : List[Any] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Optional[int] = max_position_embeddings
lowercase : Optional[int] = type_vocab_size
lowercase : Any = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Optional[int] = position_embedding_type
lowercase : Optional[int] = use_cache
| 255
| 0
|
"""simple docstring"""
import argparse
from collections import defaultdict
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_UpperCAmelCase, '''r''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = F'''class {class_name}('''
_UpperCamelCase = F'''{4 * " "}def {test_name}('''
_UpperCamelCase = F'''{8 * " "}{correct_line.split()[0]}'''
_UpperCamelCase = F'''{16 * " "}{correct_line.split()[0]}'''
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = []
for line in lines:
if line.startswith(_UpperCAmelCase ):
_UpperCamelCase = True
elif in_class and line.startswith(_UpperCAmelCase ):
_UpperCamelCase = True
elif in_class and in_func and (line.startswith(_UpperCAmelCase ) or line.startswith(_UpperCAmelCase )):
_UpperCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_UpperCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_UpperCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * " "}{correct_line}''' )
_UpperCamelCase = False
else:
new_lines.append(_UpperCAmelCase )
with open(_UpperCAmelCase, '''w''' ) as f:
for line in new_lines:
f.write(_UpperCAmelCase )
def lowerCamelCase__ ( __snake_case, __snake_case=None ) -> List[str]:
"""simple docstring"""
if fail is not None:
with open(_UpperCAmelCase, '''r''' ) as f:
_UpperCamelCase = {l.strip() for l in f.readlines()}
else:
_UpperCamelCase = None
with open(_UpperCAmelCase, '''r''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = defaultdict(_UpperCAmelCase )
for line in correct_lines:
_UpperCamelCase = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
_a = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 350
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = analyze_text(__snake_case )
_UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
_UpperCamelCase = sum(single_char_strings.values() )
# one length string
_UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_UpperCamelCase = single_char_strings[ch]
_UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(__snake_case ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_UpperCamelCase = sum(two_char_strings.values() )
_UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_UpperCamelCase = cha + cha
if sequence in two_char_strings:
_UpperCamelCase = two_char_strings[sequence]
_UpperCamelCase = int(__snake_case ) / all_sum
my_sec_sum += prob * math.loga(__snake_case )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase__ ( __snake_case ) -> tuple[dict, dict]:
"""simple docstring"""
_UpperCamelCase = Counter() # type: ignore
_UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(__snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 100
| 0
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'efficientnet'
def __init__( self, __a = 3, __a = 600, __a = 2.0, __a = 3.1, __a = 8, __a = [3, 3, 5, 3, 5, 5, 3], __a = [32, 16, 24, 40, 80, 112, 192], __a = [16, 24, 40, 80, 112, 192, 320], __a = [], __a = [1, 2, 2, 2, 1, 2, 1], __a = [1, 2, 2, 3, 3, 4, 1], __a = [1, 6, 6, 6, 6, 6, 6], __a = 0.25, __a = "swish", __a = 2560, __a = "mean", __a = 0.02, __a = 0.001, __a = 0.99, __a = 0.5, __a = 0.2, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : Tuple = width_coefficient
_lowerCAmelCase : Dict = depth_coefficient
_lowerCAmelCase : Optional[int] = depth_divisor
_lowerCAmelCase : Optional[Any] = kernel_sizes
_lowerCAmelCase : Any = in_channels
_lowerCAmelCase : int = out_channels
_lowerCAmelCase : Union[str, Any] = depthwise_padding
_lowerCAmelCase : Union[str, Any] = strides
_lowerCAmelCase : int = num_block_repeats
_lowerCAmelCase : Union[str, Any] = expand_ratios
_lowerCAmelCase : Optional[Any] = squeeze_expansion_ratio
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dim
_lowerCAmelCase : str = pooling_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = batch_norm_eps
_lowerCAmelCase : Optional[Any] = batch_norm_momentum
_lowerCAmelCase : List[Any] = dropout_rate
_lowerCAmelCase : List[Any] = drop_connect_rate
_lowerCAmelCase : Optional[int] = sum(__a) * 4
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-5
| 36
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vision-encoder-decoder'
lowerCamelCase__ = True
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
_lowerCAmelCase : str = kwargs.pop("encoder")
_lowerCAmelCase : Any = encoder_config.pop("model_type")
_lowerCAmelCase : str = kwargs.pop("decoder")
_lowerCAmelCase : List[str] = decoder_config.pop("model_type")
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[int] = True
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : str = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[str] = self.encoder.to_dict()
_lowerCAmelCase : List[str] = self.decoder.to_dict()
_lowerCAmelCase : Any = self.__class__.model_type
return output
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}})
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
import torch
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : List[str] = super().generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape
_lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCAmelCase : List[str] = dummy_input.pop("input_ids")
_lowerCAmelCase : List[str] = dummy_input.pop("attention_mask")
_lowerCAmelCase : Optional[int] = torch.zeros(__a)
return common_inputs
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(__a)
def snake_case__ ( self, __a, __a, __a = "default"):
'''simple docstring'''
_lowerCAmelCase : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
| 36
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''swinv2'''
snake_case = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , __UpperCAmelCase : Dict=224 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Optional[Any]=96 , __UpperCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __UpperCAmelCase : int=[3, 6, 12, 24] , __UpperCAmelCase : int=7 , __UpperCAmelCase : Dict=4.0 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : List[str]=1E-5 , __UpperCAmelCase : Any=32 , **__UpperCAmelCase : int , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_A = image_size
_A = patch_size
_A = num_channels
_A = embed_dim
_A = depths
_A = len(__UpperCAmelCase )
_A = num_heads
_A = window_size
_A = mlp_ratio
_A = qkv_bias
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = drop_path_rate
_A = hidden_act
_A = use_absolute_embeddings
_A = layer_norm_eps
_A = initializer_range
_A = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
_A = (0, 0, 0, 0)
| 174
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def __lowercase ( __lowercase = 150_0000 ) -> int:
'''simple docstring'''
_A = defaultdict(__lowercase )
_A = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __lowercase , 2 ):
if gcd(__lowercase , __lowercase ) > 1:
continue
_A = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__lowercase , limit + 1 , __lowercase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174
| 1
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> List[str]:
'''simple docstring'''
a : str = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowercase ( A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : Dict = 0
while b > 0:
if b & 1:
a : List[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 40
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase):
@property
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.dummy_uncond_unet
_UpperCAmelCase : Optional[Any] = KarrasVeScheduler()
_UpperCAmelCase : Optional[int] = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : str = pipe(num_inference_steps=2 , generator=_A , output_type="""numpy""" ).images
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=_A , output_type="""numpy""" , return_dict=_A )[0]
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = """google/ncsnpp-celebahq-256"""
_UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(_A )
_UpperCAmelCase : Optional[int] = KarrasVeScheduler()
_UpperCAmelCase : Optional[int] = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : Tuple = pipe(num_inference_steps=20 , generator=_A , output_type="""numpy""" ).images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 246
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Any = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""CLIPFeatureExtractor"""]
lowercase : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase = """bit"""
__lowerCAmelCase = ["""preactivation""", """bottleneck"""]
__lowerCAmelCase = ["""SAME""", """VALID"""]
def __init__( self : Optional[int] , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Tuple=64 , lowerCamelCase_ : Union[str, Any]=[256, 512, 1024, 2048] , lowerCamelCase_ : str=[3, 4, 6, 3] , lowerCamelCase_ : str="preactivation" , lowerCamelCase_ : Dict="relu" , lowerCamelCase_ : Any=None , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Any=32 , lowerCamelCase_ : int=1 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=None , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCamelCase = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
UpperCamelCase = num_channels
UpperCamelCase = embedding_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = layer_type
UpperCamelCase = hidden_act
UpperCamelCase = global_padding
UpperCamelCase = num_groups
UpperCamelCase = drop_path_rate
UpperCamelCase = embedding_dynamic_padding
UpperCamelCase = output_stride
UpperCamelCase = width_factor
UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 343
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = ShapEPipeline
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase = False
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase = PriorTransformer(**lowerCamelCase_ )
return model
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase = ShapERenderer(**lowerCamelCase_ )
return model
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.dummy_prior
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = self.dummy_tokenizer
UpperCamelCase = self.dummy_renderer
UpperCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , )
UpperCamelCase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """cpu"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
UpperCamelCase = output.images[0]
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = torch_device == """cpu"""
UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase = batch_size * [inputs[key]]
UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase = pipe(
"""a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 343
| 1
|
"""simple docstring"""
__UpperCamelCase = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.355818,
}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case_ = (
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(UpperCAmelCase )}'
)
raise ValueError(UpperCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 1
|
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__magic_name__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__magic_name__ = []
__magic_name__ = []
__magic_name__ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
__magic_name__ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"emoji": True,
},
}
]
__magic_name__ = 0
for log in Path().glob("*.log"):
__magic_name__ = 0
with open(log, "r") as f:
for line in f:
__magic_name__ = json.loads(line)
if line.get("nodeid", "") != "":
__magic_name__ = line["nodeid"]
if line.get("duration", None) is not None:
__magic_name__ = F"""{line["duration"]:.4f}"""
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__magic_name__ = []
log.unlink()
__magic_name__ = ""
__magic_name__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
__magic_name__ = []
__magic_name__ = {}
for test in failed_tests:
__magic_name__ = test[0].split("::")
__magic_name__ = data[0].split("/")[-1]
if data[0] not in filesafailed:
__magic_name__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__magic_name__ = [test[0] for test in failed_table]
__magic_name__ = list(set(files))
# Count number of instances in failed_tests
__magic_name__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__magic_name__ = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__magic_name__ = "Too many failed tests, please see the full report in the Action results."
__magic_name__ = len(err) + 10
__magic_name__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
__magic_name__ = "No failed tests! 🤗"
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__magic_name__ = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
__magic_name__ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
__magic_name__ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__magic_name__ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__magic_name__ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__magic_name__ = row[0]
else:
__magic_name__ = ""
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 100
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''blip_2_vision_model'''
def __init__( self , lowerCAmelCase__=1_4_0_8 , lowerCAmelCase__=6_1_4_4 , lowerCAmelCase__=3_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0_00_01 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = qkv_bias
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
cls._set_token_in_kwargs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""") == "blip-2":
__SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = '''blip_2_qformer'''
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=2 , lowerCAmelCase__=1_4_0_8 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = cross_attention_frequency
__SCREAMING_SNAKE_CASE = encoder_hidden_size
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
cls._set_token_in_kwargs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""") == "blip-2":
__SCREAMING_SNAKE_CASE = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''blip-2'''
__lowercase : Any = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=3_2 , **lowerCAmelCase__):
super().__init__(**lowerCAmelCase__)
if vision_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""")
if qformer_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""")
if text_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""")
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = BlipaQFormerConfig(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.text_config.tie_word_embeddings
__SCREAMING_SNAKE_CASE = self.text_config.is_encoder_decoder
__SCREAMING_SNAKE_CASE = num_query_tokens
__SCREAMING_SNAKE_CASE = self.vision_config.hidden_size
__SCREAMING_SNAKE_CASE = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__SCREAMING_SNAKE_CASE = 1.0
__SCREAMING_SNAKE_CASE = 0.02
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
__SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE = self.qformer_config.to_dict()
__SCREAMING_SNAKE_CASE = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 100
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase__ : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> Dict:
"""simple docstring"""
for attribute in key.split(""".""" ):
A_ : Dict = getattr(a_ , a_ )
if weight_type is not None:
A_ : int = getattr(a_ , a_ ).shape
else:
A_ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
A_ : int = value
elif weight_type == "weight_g":
A_ : Optional[Any] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : str = value
elif weight_type == "running_mean":
A_ : Union[str, Any] = value
elif weight_type == "running_var":
A_ : List[str] = value
elif weight_type == "num_batches_tracked":
A_ : List[str] = value
elif weight_type == "inv_freq":
A_ : Optional[Any] = value
else:
A_ : Union[str, Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : str = []
A_ : Tuple = fairseq_model.state_dict()
A_ : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
A_ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == """group""" , )
A_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
A_ : Tuple = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A_ : List[str] = True
if "*" in mapped_key:
A_ : Any = name.split(a_ )[0].split(""".""" )[-2]
A_ : int = mapped_key.replace("""*""" , a_ )
if "pos_bias_u" in name:
A_ : int = None
elif "pos_bias_v" in name:
A_ : List[Any] = None
elif "weight_g" in name:
A_ : int = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "bias" in name:
A_ : List[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ : List[str] = """weight"""
elif "running_mean" in name:
A_ : str = """running_mean"""
elif "inv_freq" in name:
A_ : Tuple = """inv_freq"""
elif "running_var" in name:
A_ : Dict = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[str] = """num_batches_tracked"""
else:
A_ : Dict = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F"Unused weights: {unused_weights}" )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : List[str] = full_name.split("""conv_layers.""" )[-1]
A_ : List[Any] = name.split(""".""" )
A_ : Optional[Any] = int(items[0] )
A_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
A_ : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
A_ : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
A_ : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
A_ : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a_ )
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ , a_=None , a_=None , a_=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
A_ : List[str] = WavaVecaConformerConfig.from_pretrained(a_ , hidden_act="""swish""" )
else:
A_ : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
A_ : Optional[int] = """rotary"""
if is_finetuned:
if dict_path:
A_ : Optional[Any] = Dictionary.load(a_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : Any = target_dict.pad_index
A_ : int = target_dict.bos_index
A_ : Dict = target_dict.eos_index
A_ : List[Any] = len(target_dict.symbols )
A_ : Optional[int] = os.path.join(a_ , """vocab.json""" )
if not os.path.isdir(a_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(a_ ) )
return
os.makedirs(a_ , exist_ok=a_ )
A_ : str = target_dict.indices
# fairseq has the <pad> and <s> switched
A_ : List[Any] = 0
A_ : Dict = 1
with open(a_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(a_ , a_ )
A_ : str = WavaVecaCTCTokenizer(
a_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=a_ , )
A_ : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
A_ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=a_ , return_attention_mask=a_ , )
A_ : Optional[Any] = WavaVecaProcessor(feature_extractor=a_ , tokenizer=a_ )
processor.save_pretrained(a_ )
A_ : Dict = WavaVecaConformerForCTC(a_ )
else:
A_ : Tuple = WavaVecaConformerForPreTraining(a_ )
if is_finetuned:
A_ , A_ , A_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
A_ : Tuple = argparse.Namespace(task="""audio_pretraining""" )
A_ : Optional[int] = fairseq.tasks.setup_task(a_ )
A_ , A_ , A_ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a_ )
A_ : Any = model[0].eval()
recursively_load_weights(a_ , a_ , not is_finetuned )
hf_wavavec.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase__ : List[Any] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 164
|
'''simple docstring'''
def UpperCAmelCase ( a_ = 5_0_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
A_ : Union[str, Any] = set()
A_ : List[str] = int((limit - 2_4) ** (1 / 2) )
A_ : Dict = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_ ) ) )
for primea in primes:
A_ : Union[str, Any] = primea * primea
for primea in primes:
A_ : Optional[int] = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
A_ : Tuple = primea * primea * primea * primea
A_ : List[str] = square + cube + tetr
if total >= limit:
break
ret.add(a_ )
return len(a_ )
if __name__ == "__main__":
print(f'{solution() = }')
| 164
| 1
|
'''simple docstring'''
from __future__ import annotations
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase = text, pattern
__lowerCAmelCase , __lowerCAmelCase = len(__lowercase ), len(__lowercase )
def _snake_case (self , __lowercase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _snake_case (self , __lowercase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _snake_case (self ):
# searches pattern in text and returns index positions
__lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
__lowerCAmelCase = self.mismatch_in_text(__lowercase )
if mismatch_index == -1:
positions.append(__lowercase )
else:
__lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
__lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_UpperCAmelCase : Any = """ABAABA"""
_UpperCAmelCase : str = """AB"""
_UpperCAmelCase : Optional[int] = BoyerMooreSearch(text, pattern)
_UpperCAmelCase : List[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 174
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : torch.FloatTensor
class a__ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__(self , __lowercase = 3 , __lowercase = 3 , __lowercase = ("DownEncoderBlock2D",) , __lowercase = ("UpDecoderBlock2D",) , __lowercase = (64,) , __lowercase = 1 , __lowercase = "silu" , __lowercase = 3 , __lowercase = 32 , __lowercase = 2_56 , __lowercase = 32 , __lowercase = None , __lowercase = 0.1_8_2_1_5 , __lowercase = "group" , ):
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=__lowercase , out_channels=__lowercase , down_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , double_z=__lowercase , )
__lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
__lowerCAmelCase = VectorQuantizer(__lowercase , __lowercase , beta=0.2_5 , remap=__lowercase , sane_index_shape=__lowercase )
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=__lowercase , out_channels=__lowercase , up_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , norm_type=__lowercase , )
@apply_forward_hook
def _snake_case (self , __lowercase , __lowercase = True ):
__lowerCAmelCase = self.encoder(__lowercase )
__lowerCAmelCase = self.quant_conv(__lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowercase )
@apply_forward_hook
def _snake_case (self , __lowercase , __lowercase = False , __lowercase = True ):
# also go through quantization layer
if not force_not_quantize:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.quantize(__lowercase )
else:
__lowerCAmelCase = h
__lowerCAmelCase = self.post_quant_conv(__lowercase )
__lowerCAmelCase = self.decoder(__lowercase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
def _snake_case (self , __lowercase , __lowercase = True ):
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(__lowercase ).latents
__lowerCAmelCase = self.decode(__lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
| 174
| 1
|
"""simple docstring"""
from __future__ import annotations
def _A (__a , __a , __a ) -> List[Any]:
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
"""simple docstring"""
from itertools import permutations
def _A (__a ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
SCREAMING_SNAKE_CASE_ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _A (__a = 10 ) -> int:
"""simple docstring"""
return sum(
int(''''''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318
| 0
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ : str = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> List[str]:
if isinstance(__UpperCAmelCase , torch.Tensor ):
return image
elif isinstance(__UpperCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [image]
SCREAMING_SNAKE_CASE_ = [trans(img.convert('RGB' ) ) for img in image]
SCREAMING_SNAKE_CASE_ = torch.stack(__UpperCAmelCase )
return image
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Tuple ):
if strength < 0 or strength > 1:
raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}" )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ):
# get the original timestep using init_timestep
SCREAMING_SNAKE_CASE_ = min(int(num_inference_steps * strength ) , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any=None ):
if not isinstance(_lowerCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowerCAmelCase )}" )
SCREAMING_SNAKE_CASE_ = image.to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
SCREAMING_SNAKE_CASE_ = init_latents.shape
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
# get latents
print('add noise to latents at timestep' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = init_latents
return latents
@torch.no_grad()
def __call__( self : str , _lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] = None , _lowerCAmelCase : float = 0.8 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : int = 50 , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
self.check_inputs(_lowerCAmelCase )
# 2. Preprocess image
SCREAMING_SNAKE_CASE_ = preprocess(_lowerCAmelCase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowerCAmelCase , device=self.device )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_timesteps(_lowerCAmelCase , _lowerCAmelCase , self.device )
SCREAMING_SNAKE_CASE_ = timesteps[:1].repeat(_lowerCAmelCase )
# 4. Prepare latent variables
SCREAMING_SNAKE_CASE_ = self.prepare_latents(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.unet.dtype , self.device , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = latents
# 5. Denoising loop
for t in self.progress_bar(_lowerCAmelCase ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE_ = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , eta=_lowerCAmelCase , use_clipped_model_output=_lowerCAmelCase , generator=_lowerCAmelCase , ).prev_sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowerCAmelCase )
| 225
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCamelCase__ : Union[str, Any] = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCamelCase__ : Optional[Any] = '</w>'
lowerCamelCase__ : Union[str, Any] = '@@ '
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ = char
return pairs
# Speech2Text2 has no max input length
lowerCamelCase__ : Any = {'facebook/s2t-wav2vec2-large-en-de': 1_024}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]="<s>" , _lowerCAmelCase : Any="<pad>" , _lowerCAmelCase : List[str]="</s>" , _lowerCAmelCase : int="<unk>" , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Tuple , ):
super().__init__(
unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
with open(_lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
else:
with open(_lowerCAmelCase , encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n' )[:-1]
SCREAMING_SNAKE_CASE_ = [tuple(merge.split()[:2] ) for merge in merges]
SCREAMING_SNAKE_CASE_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE_ = {}
@property
def lowerCAmelCase_ ( self : List[str] ):
return len(self.decoder )
def lowerCAmelCase_ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
while i < len(_lowerCAmelCase ):
try:
SCREAMING_SNAKE_CASE_ = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ = tuple(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ = get_pairs(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ' '.join(_lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
SCREAMING_SNAKE_CASE_ = '\n' + BPE_TOKEN_MERGES
if word.endswith(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = word.replace(_lowerCAmelCase , '' )
SCREAMING_SNAKE_CASE_ = word.replace(' ' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = word
return word
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ = text.lower()
SCREAMING_SNAKE_CASE_ = text.split()
SCREAMING_SNAKE_CASE_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(' ' ) ) )
return split_tokens
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = self.decoder.get(_lowerCAmelCase , self.unk_token )
return result
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = ' '.join(_lowerCAmelCase )
# make sure @@ tokens are concatenated
SCREAMING_SNAKE_CASE_ = ''.join(string.split(_lowerCAmelCase ) )
return string
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + '\n' )
SCREAMING_SNAKE_CASE_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE_ = token_index
writer.write(' '.join(_lowerCAmelCase ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 225
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""pixel_values"""]
def __init__( self : List[str] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : List[Any] , ) -> None:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =size if size is not None else {'''shortest_edge''': 2_56}
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : Tuple =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : int =do_resize
SCREAMING_SNAKE_CASE__ : Dict =size
SCREAMING_SNAKE_CASE__ : List[str] =resample
SCREAMING_SNAKE_CASE__ : List[Any] =do_center_crop
SCREAMING_SNAKE_CASE__ : str =crop_size
SCREAMING_SNAKE_CASE__ : List[str] =do_rescale
SCREAMING_SNAKE_CASE__ : Optional[Any] =rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] =do_normalize
SCREAMING_SNAKE_CASE__ : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : int , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple ) -> np.ndarray:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Dict , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : List[Any] , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : int , ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : int =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : List[str] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : int =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Any =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : str =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Dict =[to_numpy_array(__lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Tuple =[self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : List[Tuple] = None ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowercase ):
SCREAMING_SNAKE_CASE__ : int =target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : List[Any] =[]
for idx in range(len(__lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 222
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 222
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__a :List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = XLNetTokenizer
_lowerCamelCase : List[str] = XLNetTokenizerFast
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : List[str] = True
def __A ( self : Optional[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ = XLNetTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : Tuple ):
A_ = "<s>"
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def __A ( self : List[str] ):
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(UpperCAmelCase ) , 1006 )
def __A ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __A ( self : Tuple ):
A_ = XLNetTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
A_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] )
A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
A_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __A ( self : Dict ):
A_ = XLNetTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase )
A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def __A ( self : List[Any] ):
A_ = XLNetTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase )
A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def __A ( self : Union[str, Any] ):
A_ = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
A_ = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase )
A_ = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __A ( self : Union[str, Any] ):
# fmt: off
A_ = {"input_ids": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 312
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : Tuple = 'OwlViTImageProcessor'
_lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase ))
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 312
| 1
|
'''simple docstring'''
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[str] = "openai/whisper-base"
lowercase__ : List[str] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
lowercase__ : Dict = "transcriber"
lowercase__ : Optional[int] = WhisperProcessor
lowercase__ : int = WhisperForConditionalGeneration
lowercase__ : int = ["audio"]
lowercase__ : Tuple = ["text"]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
return self.pre_processor(lowerCamelCase_ , return_tensors='''pt''' ).input_features
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]:
return self.model.generate(inputs=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
return self.pre_processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )[0]
| 352
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Dict = ""
lowercase__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowercase__ : str = None # compression type in fsspec. ex: "gzip"
lowercase__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , lowerCamelCase_ = "" , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ ) -> Any:
super().__init__(self , **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase__ = fsspec.open(
lowerCamelCase_ , mode='''rb''' , protocol=lowerCamelCase_ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCAmelCase__ = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCAmelCase__ = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCAmelCase__ = None
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase_ ).lstrip('''/''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.dir_cache is None:
lowerCAmelCase__ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCAmelCase__ = {f['''name''']: f}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]:
return self.file.open().read()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = "rb" , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> List[str]:
lowerCAmelCase__ = self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = "bz2"
lowercase__ : str = "bz2"
lowercase__ : Optional[int] = ".bz2"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = "gzip"
lowercase__ : int = "gzip"
lowercase__ : int = ".gz"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Tuple = "lz4"
lowercase__ : Optional[Any] = "lz4"
lowercase__ : int = ".lz4"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[int] = "xz"
lowercase__ : str = "xz"
lowercase__ : List[Any] = ".xz"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = "zstd"
lowercase__ : Union[str, Any] = "zstd"
lowercase__ : Dict = ".zst"
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = "rb" , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = DEFAULT_BLOCK_SIZE , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(
fo=lowerCamelCase_ , mode=lowerCamelCase_ , target_protocol=lowerCamelCase_ , target_options=lowerCamelCase_ , block_size=lowerCamelCase_ , **lowerCamelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase__ = self.file.__enter__
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = file_
def __enter__( self ) -> Tuple:
self._file.__enter__()
return self
def __exit__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> str:
self._file.__exit__(*lowerCamelCase_ , **lowerCamelCase_ )
def __iter__( self ) -> Any:
return iter(self._file )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return next(self._file )
def __getattr__( self , lowerCamelCase_ ) -> str:
return getattr(self._file , lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_ , **lowerCamelCase_ ):
return WrappedFile(_enter(*lowerCamelCase_ , **lowerCamelCase_ ) )
lowerCAmelCase__ = fixed_enter
| 228
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__A = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__A = "cuda" if torch.cuda.is_available() else "cpu"
def _A ( lowercase__ , lowercase__=100 , lowercase__=" " ):
lowercase__ = text.split(lowercase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase__ ) , lowercase__ )]
def _A ( lowercase__ ):
lowercase__ , lowercase__ = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(lowercase__ ):
titles.append(title if title is not None else """""" )
texts.append(lowercase__ )
return {"title": titles, "text": texts}
def _A ( lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=lowercase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowercase__ = ctx_encoder(input_ids.to(device=lowercase__ ) , return_dict=lowercase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _A ( lowercase__ , lowercase__ , lowercase__ , ):
######################################
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ = dataset.map(lowercase__ , batched=lowercase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase__ )
lowercase__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase__ = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowercase__ = dataset.map(
partial(lowercase__ , ctx_encoder=lowercase__ , ctx_tokenizer=lowercase__ ) , batched=lowercase__ , batch_size=processing_args.batch_size , features=lowercase__ , )
# And finally save your dataset
lowercase__ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(lowercase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=lowercase__ )
# And save the index
lowercase__ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(lowercase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A :
lowerCamelCase : str = field(
default=str(Path(__UpperCAmelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
lowerCamelCase : Optional[str] = field(
default=__UpperCAmelCase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
lowerCamelCase : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
lowerCamelCase : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
lowerCamelCase : Optional[str] = field(
default=str(Path(__UpperCAmelCase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class A :
lowerCamelCase : Optional[int] = field(
default=__UpperCAmelCase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
lowerCamelCase : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class A :
lowerCamelCase : int = field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
lowerCamelCase : int = field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__A = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__A , __A , __A = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__A = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 164
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 164
| 1
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 16
_A = 32
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] = 16 ):
__UpperCamelCase =AutoTokenizer.from_pretrained('bert-base-cased' )
__UpperCamelCase =load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase =datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase =16
elif accelerator.mixed_precision != "no":
__UpperCamelCase =8
else:
__UpperCamelCase =None
return tokenizer.pad(
_lowerCamelCase , padding='longest' , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors='pt' , )
# Instantiate dataloaders.
__UpperCamelCase =DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase , drop_last=_lowerCamelCase )
__UpperCamelCase =DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase =config["""lr"""]
__UpperCamelCase =int(config['num_epochs'] )
__UpperCamelCase =int(config['seed'] )
__UpperCamelCase =int(config['batch_size'] )
__UpperCamelCase =evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__UpperCamelCase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase =batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase =MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
__UpperCamelCase =get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase =model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase =AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
__UpperCamelCase =get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase =accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase =model(**_lowerCamelCase )
__UpperCamelCase =outputs.loss
__UpperCamelCase =loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase =model(**_lowerCamelCase )
__UpperCamelCase =outputs.logits.argmax(dim=-1 )
__UpperCamelCase =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
__UpperCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _lowerCamelCase )
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_lowerCamelCase , default=_lowerCamelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__UpperCamelCase =parser.parse_args()
__UpperCamelCase ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 363
|
from __future__ import annotations
from typing import Any
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[Any] ):
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : int ):
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_A = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 117
| 0
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=1_8 , _UpperCamelCase=3_0 , _UpperCamelCase=4_0_0 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = size if size is not None else {'height': 2_0, 'width': 2_0}
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : Union[str, Any] = max_resolution
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : Union[str, Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = do_convert_rgb
UpperCAmelCase_ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase_ : Optional[int] = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[str] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
UpperCAmelCase_ : int = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = PixaStructImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = PixaStructImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : List[str] = 2_0_4_8
UpperCAmelCase_ : List[str] = image_processor(_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Optional[Any] = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ : List[str] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCamelCase ):
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
UpperCAmelCase_ : Any = 'Hello'
UpperCAmelCase_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Any = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self ) -> Any:
# Initialize image_processor
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
UpperCAmelCase_ : List[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : int = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self ) -> int:
# Initialize image_processor
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : List[str] = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = PixaStructImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ : int = 3
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) )
def __UpperCAmelCase ( self ) -> str:
# Initialize image_processor
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Any = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 29
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318
| 0
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : int = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
a__ : List[Any] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for attribute in key.split("." ):
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == "group" , )
__SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(lowerCAmelCase_ )[0].split("." )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("*" , lowerCAmelCase_ )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = "weight_g"
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
__SCREAMING_SNAKE_CASE = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE = "weight"
else:
__SCREAMING_SNAKE_CASE = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = full_name.split("conv_layers." )[-1]
__SCREAMING_SNAKE_CASE = name.split("." )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.load(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = WavLMConfigOrig(checkpoint["cfg"] )
__SCREAMING_SNAKE_CASE = WavLMOrig(lowerCAmelCase_ )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
__SCREAMING_SNAKE_CASE = WavLMConfig.from_pretrained(lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = WavLMConfig()
__SCREAMING_SNAKE_CASE = WavLMModel(lowerCAmelCase_ )
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ )
hf_wavlm.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
a__ : List[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 355
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a__ : Dict = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a__ : Any = TaTokenizerFast
a__ : Tuple = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a__ : str = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 195
| 0
|
import sys
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = len(lowercase )
UpperCamelCase = [[0 for x in range(lowercase )] for x in range(lowercase )]
UpperCamelCase = [[0 for x in range(lowercase )] for x in range(lowercase )]
for chain_length in range(2 , lowercase ):
for a in range(1 , n - chain_length + 1 ):
UpperCamelCase = a + chain_length - 1
UpperCamelCase = sys.maxsize
for c in range(lowercase , lowercase ):
UpperCamelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCamelCase = cost
UpperCamelCase = c
return matrix, sol
def A ( lowercase , lowercase , lowercase ) -> List[str]:
'''simple docstring'''
if i == j:
print('A' + str(lowercase ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(lowercase , lowercase , optimal_solution[i][j] )
print_optiomal_solution(lowercase , optimal_solution[i][j] + 1 , lowercase )
print(')' , end=' ' )
def A ( ) -> Any:
'''simple docstring'''
UpperCamelCase = [30, 35, 15, 5, 10, 20, 25]
UpperCamelCase = len(lowercase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCamelCase , UpperCamelCase = matrix_chain_order(lowercase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 222
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowercase ( unittest.TestCase ):
__lowercase : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = VideoClassificationPipeline(model=A_ , image_processor=A_ , top_k=2 )
UpperCamelCase = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
for example in examples:
UpperCamelCase = video_classifier(A_ )
self.assertEqual(
A_ , [
{'score': ANY(A_ ), 'label': ANY(A_ )},
{'score': ANY(A_ ), 'label': ANY(A_ )},
] , )
@require_torch
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
UpperCamelCase = pipeline(
'video-classification' , model=A_ , feature_extractor=A_ , frame_sampling_rate=4 )
UpperCamelCase = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = video_classifier(A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
| 222
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCAmelCase :
def __init__( self :int , __magic_name__ :Optional[Any] , __magic_name__ :Dict=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :Optional[Any]=True , __magic_name__ :int=True , __magic_name__ :int=True , __magic_name__ :List[Any]=99 , __magic_name__ :List[str]=32 , __magic_name__ :Any=2 , __magic_name__ :int=4 , __magic_name__ :List[str]=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Any=0.1 , __magic_name__ :Optional[int]=512 , __magic_name__ :Dict=16 , __magic_name__ :Tuple=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Tuple=3 , __magic_name__ :Any=4 , __magic_name__ :str=None , ):
'''simple docstring'''
a = parent
a = 13
a = 7
a = True
a = True
a = True
a = True
a = 99
a = 384
a = 2
a = 4
a = 37
a = """gelu"""
a = 0.1
a = 0.1
a = 512
a = 16
a = 2
a = 0.02
a = 3
a = 4
a = 128
a = 2
a = 9
a = 1
a = None
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__magic_name__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Any , __magic_name__ :str , __magic_name__ :List[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Tuple , __magic_name__ :List[Any] , __magic_name__ :List[Any] ):
'''simple docstring'''
a = TFConvBertModel(config=__magic_name__ )
a = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
a = [input_ids, input_mask]
a = model(__magic_name__ )
a = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :List[str] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :Dict ):
'''simple docstring'''
a = TFConvBertForMaskedLM(config=__magic_name__ )
a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str , __magic_name__ :Any , __magic_name__ :Any , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[int] , __magic_name__ :Dict ):
'''simple docstring'''
a = self.num_labels
a = TFConvBertForSequenceClassification(config=__magic_name__ )
a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Any , __magic_name__ :int , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :int ):
'''simple docstring'''
a = self.num_choices
a = TFConvBertForMultipleChoice(config=__magic_name__ )
a = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
a = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self :int , __magic_name__ :str , __magic_name__ :Any , __magic_name__ :Optional[int] , __magic_name__ :Any , __magic_name__ :Any , __magic_name__ :List[Any] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = self.num_labels
a = TFConvBertForTokenClassification(config=__magic_name__ )
a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :List[Any] , __magic_name__ :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :Dict , __magic_name__ :Any ):
'''simple docstring'''
a = TFConvBertForQuestionAnswering(config=__magic_name__ )
a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = TFConvBertModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
a = True
if hasattr(__magic_name__ , """use_cache""" ):
a = True
a = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
a = getattr(self.model_tester , """key_length""" , __magic_name__ )
for model_class in self.all_model_classes:
a = self._prepare_for_class(__magic_name__ , __magic_name__ )
a = model_class(__magic_name__ )
a = len(model(__magic_name__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ , saved_model=__magic_name__ )
a = os.path.join(__magic_name__ , """saved_model""" , """1""" )
a = tf.keras.models.load_model(__magic_name__ )
a = model(__magic_name__ )
if self.is_encoder_decoder:
a = outputs["""encoder_hidden_states"""]
a = outputs["""encoder_attentions"""]
else:
a = outputs["""hidden_states"""]
a = outputs["""attentions"""]
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
a = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
a = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
a = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
a = getattr(self.model_tester , """key_length""" , __magic_name__ )
a = getattr(self.model_tester , """key_length""" , __magic_name__ )
def check_decoder_attentions_output(__magic_name__ :Dict ):
a = len(__magic_name__ )
self.assertEqual(out_len % 2 , 0 )
a = outputs.decoder_attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__magic_name__ :Tuple ):
a = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
a = True
a = False
a = model_class(__magic_name__ )
a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
a = len(__magic_name__ )
self.assertEqual(config.output_hidden_states , __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
if self.is_encoder_decoder:
a = model_class(__magic_name__ )
a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(config.output_hidden_states , __magic_name__ )
check_decoder_attentions_output(__magic_name__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a = True
a = model_class(__magic_name__ )
a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(config.output_hidden_states , __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
# Check attention is always last and order is fine
a = True
a = True
a = model_class(__magic_name__ )
a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__magic_name__ ) )
self.assertEqual(model.config.output_hidden_states , __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
a = tf.constant([[0, 1, 2, 3, 4, 5]] )
a = model(__magic_name__ )[0]
a = [1, 6, 768]
self.assertEqual(output.shape , __magic_name__ )
a = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1E-4 )
| 350
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
UpperCamelCase__ = None
UpperCamelCase__ = "utf-8"
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = True # deprecated
UpperCamelCase__ = None # deprecated
UpperCamelCase__ = 10 << 20 # 10MB
UpperCamelCase__ = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ = JsonConfig
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
a = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
a = data_files
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
a = self.config.features.arrow_schema.field(__magic_name__ ).type
a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
a = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
# We keep only the field we are interested in
a = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
a = dataset
a = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , """rb""" ) as f:
a = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
a = max(self.config.chunksize // 32 , 16 << 10 )
a = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
a = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" )
try:
while True:
try:
a = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
a = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 347
| 0
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : str =logging.get_logger(__name__)
UpperCAmelCase : List[str] ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
UpperCAmelCase : Union[str, Any] =[
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
for attribute in key.split("."):
UpperCamelCase_ = getattr(__lowerCamelCase , __lowerCamelCase)
if weight_type is not None:
UpperCamelCase_ = getattr(__lowerCamelCase , __lowerCamelCase).shape
else:
UpperCamelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase_ = value
elif weight_type == "weight_g":
UpperCamelCase_ = value
elif weight_type == "weight_v":
UpperCamelCase_ = value
elif weight_type == "bias":
UpperCamelCase_ = value
else:
UpperCamelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""")
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = []
UpperCamelCase_ = fairseq_model.state_dict()
UpperCamelCase_ = hf_model.feature_extractor
UpperCamelCase_ = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase_ = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."]):
load_adapter(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
UpperCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
UpperCamelCase_ = True
if "*" in mapped_key:
UpperCamelCase_ = name.split(__lowerCamelCase)[0].split(".")[-2]
UpperCamelCase_ = mapped_key.replace("*" , __lowerCamelCase)
if "weight_g" in name:
UpperCamelCase_ = "weight_g"
elif "weight_v" in name:
UpperCamelCase_ = "weight_v"
elif "bias" in name:
UpperCamelCase_ = "bias"
elif "weight" in name:
UpperCamelCase_ = "weight"
else:
UpperCamelCase_ = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
continue
if not is_used:
unused_weights.append(__lowerCamelCase)
logger.warning(f"""Unused weights: {unused_weights}""")
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = full_name.split("conv_layers.")[-1]
UpperCamelCase_ = name.split(".")
UpperCamelCase_ = int(items[0])
UpperCamelCase_ = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
else:
unused_weights.append(__lowerCamelCase)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = full_name.split("adaptor.")[-1]
UpperCamelCase_ = name.split(".")
if items[1].isdigit():
UpperCamelCase_ = int(items[1])
else:
UpperCamelCase_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
UpperCamelCase_ = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""")
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
UpperCamelCase_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
UpperCamelCase_ = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""")
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
UpperCamelCase_ = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""")
elif isinstance(__lowerCamelCase , __lowerCamelCase):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
UpperCamelCase_ = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""")
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
UpperCamelCase_ = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""")
else:
unused_weights.append(__lowerCamelCase)
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = emb.weight.shape
UpperCamelCase_ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase)
UpperCamelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
UpperCamelCase_ = WavaVecaConfig.from_pretrained(
__lowerCamelCase , add_adapter=__lowerCamelCase , adapter_stride=__lowerCamelCase , adapter_kernel_size=__lowerCamelCase , use_auth_token=__lowerCamelCase , output_hidden_size=__lowerCamelCase , )
UpperCamelCase_ = MBartConfig.from_pretrained(__lowerCamelCase)
# load model
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/")[:-1]),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
UpperCamelCase_ = model[0].eval()
# load feature extractor
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase , use_auth_token=__lowerCamelCase)
# set weights for wav2vec2 encoder
UpperCamelCase_ = WavaVecaModel(__lowerCamelCase)
recursively_load_weights_wavaveca(model.encoder , __lowerCamelCase)
# load decoder weights
UpperCamelCase_ = MBartForCausalLM(__lowerCamelCase)
UpperCamelCase_ , UpperCamelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__lowerCamelCase)
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""")
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""")
UpperCamelCase_ = SpeechEncoderDecoderModel(encoder=__lowerCamelCase , decoder=__lowerCamelCase)
UpperCamelCase_ = False
UpperCamelCase_ = MBartaaTokenizer(__lowerCamelCase)
tokenizer.save_pretrained(__lowerCamelCase)
UpperCamelCase_ = hf_wavavec.config.to_dict()
UpperCamelCase_ = tokenizer.pad_token_id
UpperCamelCase_ = tokenizer.bos_token_id
UpperCamelCase_ = tokenizer.eos_token_id
UpperCamelCase_ = "mbart50"
UpperCamelCase_ = "wav2vec2"
UpperCamelCase_ = tokenizer.eos_token_id
UpperCamelCase_ = 25_00_04
UpperCamelCase_ = tokenizer.eos_token_id
UpperCamelCase_ = SpeechEncoderDecoderConfig.from_dict(__lowerCamelCase)
hf_wavavec.save_pretrained(__lowerCamelCase)
feature_extractor.save_pretrained(__lowerCamelCase)
if __name__ == "__main__":
UpperCAmelCase : Any =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_0004, type=int, help="""`decoder_start_token_id` of model config""")
UpperCAmelCase : int =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 128
|
import math
from datetime import datetime, timedelta
def __A ( __lowerCamelCase ) -> datetime:
a = year % 19
a = year % 4
a = year % 7
a = math.floor(year / 100 )
a = math.floor((13 + 8 * leap_day_inhibits) / 25 )
a = leap_day_inhibits / 4
a = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
a = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
a = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
a = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowerCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowerCamelCase , 4 , 18 )
else:
return datetime(__lowerCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
__UpperCamelCase : Tuple = "will be" if year > datetime.now().year else "was"
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 228
| 0
|
import torch
from diffusers import DiffusionPipeline
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , __a , __a ) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
def __call__(self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCAmelCase__ = 1
UpperCAmelCase__ = self.unet(__a , __a ).sample
UpperCAmelCase__ = self.scheduler.step(__a , __a , __a ).prev_sample
UpperCAmelCase__ = scheduler_output - scheduler_output + torch.ones_like(__a )
return result
| 353
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1E-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , ) -> str:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = patch_norm
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = is_training
UpperCAmelCase__ = scope
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = encoder_stride
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = SwinvaModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = SwinvaForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = SwinvaForMaskedImageModeling(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = SwinvaForImageClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = SwinvaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , embed_dim=37 )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
UpperCAmelCase__ = len(self.model_tester.depths )
self.assertEqual(len(__a ) , __a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = config.window_size**2
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCAmelCase__ = len(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
UpperCAmelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase__ = 2
self.assertEqual(out_len + added_hidden_states , len(__a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCamelCase__ (self , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# Swinv2 has a different seq_length
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = reshaped_hidden_states[0].shape
UpperCAmelCase__ = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = 3
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = SwinvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = _config_zero_init(__a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__a )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCAmelCase__ = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__a )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase__ = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 335
| 0
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__A = ["text", "image", "audio"]
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Dict =[]
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__a , __a ):
inputs.append(create_inputs(__a ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Any =[]
for output in outputs:
if isinstance(__a , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__a , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__a , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs"))
self.assertTrue(hasattr(self.tool , "outputs"))
lowerCamelCase__: List[Any] =self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCAmelCase_):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
lowerCamelCase__: List[str] =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =create_inputs(self.tool.inputs)
lowerCamelCase__: Tuple =self.tool(*UpperCAmelCase_)
# There is a single output
if len(self.tool.outputs) == 1:
lowerCamelCase__: Tuple =[outputs]
self.assertListEqual(output_types(UpperCAmelCase_) , self.tool.outputs)
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description"))
self.assertTrue(hasattr(self.tool , "default_checkpoint"))
self.assertTrue(self.tool.description.startswith("This is a tool that"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =create_inputs(self.tool.inputs)
lowerCamelCase__: Tuple =self.tool(*UpperCAmelCase_)
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Optional[int] =[outputs]
self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs))
for output, output_type in zip(UpperCAmelCase_ , self.tool.outputs):
lowerCamelCase__: Optional[Any] =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Any) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[Any] =create_inputs(self.tool.inputs)
lowerCamelCase__: Union[str, Any] =[]
for _input, input_type in zip(UpperCAmelCase_ , self.tool.inputs):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
lowerCamelCase__: Optional[int] =self.tool(*UpperCAmelCase_)
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: int =[outputs]
self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs))
| 10
|
import os
from datetime import datetime as dt
from github import Github
snake_case__ : Union[str, Any] = [
'good first issue',
'feature request',
'wip',
]
def _a ( ) -> List[Any]:
'''simple docstring'''
__A = Github(os.environ['''GITHUB_TOKEN'''] )
__A = g.get_repo('''huggingface/accelerate''' )
__A = repo.get_issues(state='''open''' )
for issue in open_issues:
__A = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase : i.created_at , reverse=lowerCamelCase )
__A = comments[0] if len(lowerCamelCase ) > 0 else None
__A = dt.utcnow()
__A = (current_time - issue.updated_at).days
__A = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 117
| 0
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if gpta_config_file == "":
lowercase : Tuple = GPTaConfig()
else:
lowercase : List[str] = GPTaConfig.from_json_file(__magic_name__ )
lowercase : int = GPTaModel(__magic_name__ )
# Load weights from numpy
load_tf_weights_in_gpta(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
lowercase : int = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowercase : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , __magic_name__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
lowerCAmelCase_ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 367
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCAmelCase_ = {
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
lowerCAmelCase_ = '▁'
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , _A : Optional[int] , _A : List[str]="<s>" , _A : Tuple="</s>" , _A : Dict="</s>" , _A : Dict="<s>" , _A : List[str]="<unk>" , _A : str="<pad>" , _A : Any="<mask>" , _A : Optional[Dict[str, Any]] = None , **_A : Union[str, Any] , ) -> None:
"""simple docstring"""
lowercase : List[str] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
lowercase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
lowercase : Any = vocab_file
lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
lowercase : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : Tuple = len(self.sp_model ) - 1
lowercase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __a ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Any = [self.cls_token_id]
lowercase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __a ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : List[str] = [self.sep_token_id]
lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase : int = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : Union[str, Any] , _A : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A )
def __a ( self : Optional[int] , _A : str ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : Optional[int] = self.sp_model.PieceToId(_A )
return spm_id if spm_id else self.unk_token_id
def __a ( self : Any , _A : List[str] ) -> List[str]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_A )
def __a ( self : Any , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Dict = []
lowercase : Any = ''''''
lowercase : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
lowercase : int = True
lowercase : Optional[Any] = []
else:
current_sub_tokens.append(_A )
lowercase : List[str] = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __getstate__( self : int ) -> Optional[Any]:
"""simple docstring"""
lowercase : str = self.__dict__.copy()
lowercase : Any = None
return state
def __setstate__( self : Optional[int] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Union[str, Any] = {}
lowercase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Any = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 116
| 0
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__UpperCAmelCase =_symbol_database.Default()
__UpperCAmelCase =_descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
__UpperCAmelCase =globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__UpperCAmelCase =None
__UpperCAmelCase =b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__UpperCAmelCase =4_5
__UpperCAmelCase =1_5_8_1
__UpperCAmelCase =1_5_1_7
__UpperCAmelCase =1_5_7_0
__UpperCAmelCase =1_5_8_4
__UpperCAmelCase =1_7_9_3
__UpperCAmelCase =1_7_9_5
__UpperCAmelCase =1_9_1_6
__UpperCAmelCase =1_8_6_4
__UpperCAmelCase =1_9_0_5
__UpperCAmelCase =1_9_1_9
__UpperCAmelCase =2_4_2_9
__UpperCAmelCase =2_2_0_8
__UpperCAmelCase =2_4_1_8
__UpperCAmelCase =2_3_2_3
__UpperCAmelCase =2_4_0_7
# @@protoc_insertion_point(module_scope)
| 67
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = test_results.split(' ' )
lowercase = 0
lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowercase = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__SCREAMING_SNAKE_CASE ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
lowercase = None
lowercase = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , __SCREAMING_SNAKE_CASE ):
lowercase = True
lowercase = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowercase = line
lowercase = False
return failures
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = title
lowercase = doc_test_results['time_spent'].split(',' )[0]
lowercase = doc_test_results['success']
lowercase = doc_test_results['failures']
lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowercase = doc_test_results
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [self._time_spent]
lowercase = 0
for time in time_spent:
lowercase = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(snake_case ) == 1:
lowercase = [0, 0, time_parts[0]]
lowercase , lowercase , lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
lowercase , lowercase , lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(snake_case )}h{int(snake_case )}m{int(snake_case )}s'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 40
lowercase = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(snake_case , snake_case )}
lowercase = ''
for category, failures in category_failures.items():
if len(snake_case ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(snake_case )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(snake_case )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
lowercase = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(snake_case )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowercase = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.'
lowercase = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
lowercase = ''
for key, value in failures.items():
lowercase = value[:200] + ' [Truncated]' if len(snake_case ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
lowercase = job_name
lowercase = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowercase = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE__ ( self ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowercase = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowercase = sorted(self.doc_test_results.items() , key=lambda snake_case : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowercase = F'''*Num failures* :{len(job_result['failed'] )} \n'''
lowercase = job_result['failures']
lowercase = self.get_reply_blocks(snake_case , snake_case , snake_case , text=snake_case )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'''Results for {job}''' , blocks=snake_case , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def UpperCAmelCase_ ( ):
lowercase = os.environ['GITHUB_RUN_ID']
lowercase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowercase = requests.get(__SCREAMING_SNAKE_CASE ).json()
lowercase = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowercase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , __SCREAMING_SNAKE_CASE )
return {}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
if os.path.exists(__SCREAMING_SNAKE_CASE ):
lowercase = os.listdir(__SCREAMING_SNAKE_CASE )
for file in files:
try:
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , encoding='utf-8' ) as f:
lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}.''' ) from e
return _artifact
def UpperCAmelCase_ ( ):
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = name
lowercase = []
def __str__( self ):
return self.name
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
self.paths.append({'name': self.name, 'path': path} )
lowercase = {}
lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowercase = directory
if artifact_name not in _available_artifacts:
lowercase = Artifact(__SCREAMING_SNAKE_CASE )
_available_artifacts[artifact_name].add_path(__SCREAMING_SNAKE_CASE )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase = get_job_links()
UpperCAmelCase = retrieve_available_artifacts()
UpperCAmelCase = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase = github_actions_job_links.get('''run_doctests''')
UpperCAmelCase = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
UpperCAmelCase = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = handle_test_results(artifact['''stats'''])
UpperCAmelCase = failed
UpperCAmelCase = success
UpperCAmelCase = time_spent[1:-1] + ''', '''
UpperCAmelCase = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
UpperCAmelCase = line.replace('''FAILED ''', '''''')
UpperCAmelCase = line.split()[0].replace('''\n''', '''''')
if "::" in line:
UpperCAmelCase , UpperCAmelCase = line.split('''::''')
else:
UpperCAmelCase , UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase = all_failures[test] if test in all_failures else '''N/A'''
UpperCAmelCase = failure
break
UpperCAmelCase = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 195
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : int = '''facebook/bart-large-mnli'''
snake_case__ : Optional[int] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
snake_case__ : List[str] = '''text_classifier'''
snake_case__ : Tuple = AutoTokenizer
snake_case__ : str = AutoModelForSequenceClassification
snake_case__ : Optional[Any] = ['''text''', ['''text''']]
snake_case__ : Tuple = ['''text''']
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
super().setup()
a_ : str = self.model.config
a_ : Dict = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
a_ : Any = int(SCREAMING_SNAKE_CASE__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
a_ : Tuple = labels
return self.pre_processor(
[text] * len(SCREAMING_SNAKE_CASE__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
a_ : str = outputs.logits
a_ : Optional[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 120
|
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list:
"""simple docstring"""
a_ : int = len(__A )
for _ in range(__A ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
a_ , a_ : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ : int = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 120
| 1
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A ( snake_case__ ):
'''simple docstring'''
@slow
@require_torch
def lowerCamelCase__ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
lowercase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowercase__ = bertabert.config.encoder.vocab_size
lowercase__ = tokenizer.sep_token_id
lowercase__ = tokenizer.cls_token_id
lowercase__ = 128
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
lowercase__ = train_dataset.select(range(32 ) )
lowercase__ = val_dataset.select(range(16 ) )
lowercase__ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase : int ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 )
lowercase__ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 )
lowercase__ = inputs.input_ids
lowercase__ = inputs.attention_mask
lowercase__ = outputs.input_ids
lowercase__ = outputs.input_ids.copy()
lowercase__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowercase__ = outputs.attention_mask
assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase : Union[str, Any] ):
lowercase__ = pred.label_ids
lowercase__ = pred.predictions
# all unnecessary tokens are removed
lowercase__ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
lowercase__ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
lowercase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
lowercase__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
lowercase__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# start training
trainer.train()
| 305
|
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_SCREAMING_SNAKE_CASE ):
print(f"""{i}\t\t{d}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]:
snake_case_ = [float("""inf""" )] * vertex_count
snake_case_ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
snake_case_ = distance[u] + w
snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip())
__SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip())
__SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight}
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
__SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 347
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : List[str] = logging.get_logger(__name__)
__A : int = {"vocab_file": "spiece.model"}
__A : Optional[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]=True , lowerCamelCase : Dict=False , lowerCamelCase : Optional[int]="<s>" , lowerCamelCase : List[str]="</s>" , lowerCamelCase : str="<unk>" , lowerCamelCase : List[Any]="<sep>" , lowerCamelCase : Dict="<pad>" , lowerCamelCase : Optional[int]="<cls>" , lowerCamelCase : str="<mask>" , lowerCamelCase : Union[str, Any]=["<eop>", "<eod>"] , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : int , ) -> None:
lowerCAmelCase_ : Tuple = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
lowerCAmelCase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase , remove_space=lowerCamelCase , keep_accents=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , additional_special_tokens=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
lowerCAmelCase_ : str = 3
lowerCAmelCase_ : Any = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : Optional[Any] = vocab_file
lowerCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCAmelCase_ : Union[str, Any] = jieba
lowerCAmelCase_ : List[Any] = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowercase ( self : List[str] ) -> int:
return len(self.sp_model )
def __lowercase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Optional[Any]:
lowerCAmelCase_ : Tuple = self.__dict__.copy()
lowerCAmelCase_ : Optional[int] = None
return state
def __setstate__( self : Tuple , lowerCamelCase : Dict ) -> int:
lowerCAmelCase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ : List[Any] = {}
lowerCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self : Optional[int] , lowerCamelCase : Optional[Any] ) -> Optional[Any]:
if self.remove_space:
lowerCAmelCase_ : int = """ """.join(inputs.strip().split() )
else:
lowerCAmelCase_ : Optional[Any] = inputs
lowerCAmelCase_ : Dict = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCAmelCase_ : Tuple = unicodedata.normalize("""NFKD""" , lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = """""".join([c for c in outputs if not unicodedata.combining(lowerCamelCase )] )
if self.do_lower_case:
lowerCAmelCase_ : Union[str, Any] = outputs.lower()
return outputs
def __lowercase ( self : str , lowerCamelCase : str ) -> List[str]:
lowerCAmelCase_ : List[Any] = self.preprocess_text(lowerCamelCase )
lowerCAmelCase_ : int = self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = []
for piece in pieces:
if len(lowerCamelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCAmelCase_ : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase_ : Dict = cur_pieces[1:]
else:
lowerCAmelCase_ : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase )
else:
new_pieces.append(lowerCamelCase )
return new_pieces
def __lowercase ( self : Dict , lowerCamelCase : str ) -> Optional[int]:
return self.sp_model.PieceToId(lowerCamelCase )
def __lowercase ( self : str , lowerCamelCase : Tuple ) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase )
def __lowercase ( self : str , lowerCamelCase : Tuple ) -> int:
lowerCAmelCase_ : Optional[int] = """""".join(lowerCamelCase ).replace(lowerCamelCase , """ """ ).strip()
return out_string
def __lowercase ( self : Optional[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowercase ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1, 1]
return ([0] * len(lowerCamelCase )) + [1, 1]
def __lowercase ( self : Dict , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase_ : Any = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , """wb""" ) as fi:
lowerCAmelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def __lowercase ( self : Optional[Any] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Optional[int] ) -> List[Any]:
lowerCAmelCase_ : str = super()._decode(*lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : List[str] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 89
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase_ ( A__ : np.ndarray , A__ : np.ndarray , A__ : np.ndarray , A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = cva.getAffineTransform(A__ , A__ )
return cva.warpAffine(A__ , A__ , (rows, cols) )
if __name__ == "__main__":
# read original image
__A : Dict = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__A : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__A , __A : Dict = gray_img.shape
# set different points to rotate image
__A : List[str] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__A : Tuple = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__A : List[Any] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__A : Optional[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__A : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__A : Dict = plt.figure(1)
__A : Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 89
| 1
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def UpperCamelCase_ (self ):
"""simple docstring"""
a = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
a = load_dataset("ashraq/esc50" )
a = dataset["train"]["audio"][-1]["array"]
a = audio_classifier(lowerCamelCase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
@slow
@require_torch
def UpperCamelCase_ (self ):
"""simple docstring"""
a = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
a = load_dataset("ashraq/esc50" )
a = dataset["train"]["audio"][-1]["array"]
a = audio_classifier(lowerCamelCase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
a = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
a = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
| 227
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335
| 0
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case_ ( __A ):
def __UpperCamelCase ( self : int , lowercase_ : str ) -> Optional[Any]:
with open(lowercase_ , encoding="utf-8" ) as input_file:
lowercase__ : List[str] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
lowercase__ : List[str] = input_file.read()
lowercase__ : Optional[Any] = regexp.search(lowercase_ )
return match
def __UpperCamelCase ( self : Tuple , lowercase_ : str ) -> Dict:
with open(lowercase_ , encoding="utf-8" ) as input_file:
lowercase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
lowercase__ : Union[str, Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase__ : Union[str, Any] = regexp.finditer(lowercase_ )
lowercase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __UpperCamelCase ( self : Any ) -> List[str]:
lowercase__ : List[str] = Path("./datasets" )
lowercase__ : Union[str, Any] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_ ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def __UpperCamelCase ( self : List[str] ) -> str:
lowercase__ : Dict = Path("./datasets" )
lowercase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_ ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 333
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True):
model.train()
lowercase__ : Tuple = model(_lowerCamelCase)
lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
set_seed(42)
lowercase__ : Dict = RegressionModel()
lowercase__ : int = deepcopy(_lowerCamelCase)
lowercase__ : str = RegressionDataset(length=80)
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16)
model.to(accelerator.device)
if sched:
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3)
lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3)
lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _lowerCamelCase : Tuple):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Any):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : int = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))]
GradientState._reset_state()
def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False):
lowercase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowercase_ ( ):
lowercase__ : List[str] = Accelerator()
lowercase__ : List[Any] = RegressionDataset(length=80)
lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ : int = RegressionDataset(length=96)
lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if iteration < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if batch_num < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
lowercase__ : str = Accelerator()
lowercase__ : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**")
test_noop_sync(_lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**")
test_distributed_sync(_lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333
| 1
|
"""simple docstring"""
def _snake_case ( lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Any = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
lowerCAmelCase_ :Dict = hex_num[0] == """-"""
if is_negative:
lowerCAmelCase_ :int = hex_num[1:]
try:
lowerCAmelCase_ :Any = int(_lowerCAmelCase , 1_6 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
lowerCAmelCase_ :str = """"""
while int_num > 0:
lowerCAmelCase_ :Optional[int] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
|
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
while second != 0:
A : int = first & second
first ^= second
A : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_:int = int(input("""Enter the first number: """).strip())
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 116
| 0
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCAmelCase = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowercase ( a__ : List[str]=None ) -> List[Any]:
if subparsers is not None:
_UpperCamelCase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
_UpperCamelCase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
_UpperCamelCase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=a__ , default=a__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=a__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=a__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
_UpperCamelCase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=a__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def lowercase ( a__ : Any ) -> Optional[int]:
_UpperCamelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(a__ ):
_UpperCamelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_UpperCamelCase = defaults.command_file
if not args.command and defaults.commands is not None:
_UpperCamelCase = defaults.commands
if not args.tpu_name:
_UpperCamelCase = defaults.tpu_name
if not args.tpu_zone:
_UpperCamelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
_UpperCamelCase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
_UpperCamelCase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , a__ ):
_UpperCamelCase = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
_UpperCamelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , a__ ):
_UpperCamelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_UpperCamelCase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
_UpperCamelCase = '''; '''.join(a__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_UpperCamelCase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {" ".join(a__ )}''' )
return
subprocess.run(a__ )
print('''Successfully setup pod.''' )
def lowercase ( ) -> Dict:
_UpperCamelCase = tpu_command_parser()
_UpperCamelCase = parser.parse_args()
tpu_command_launcher(a__ )
| 54
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase_ :
snake_case__ = PegasusConfig
snake_case__ = {}
snake_case__ = '''gelu'''
def __init__( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[str]=13 , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : int=False , __UpperCamelCase : Optional[Any]=99 , __UpperCamelCase : int=32 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : List[str]=20 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Dict=0 , ) -> str:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def _UpperCamelCase ( self : Tuple ) -> List[str]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _UpperCamelCase ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) -> str:
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) -> List[str]:
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowercase ( a__ : Dict , a__ : str , a__ : str , a__ : Optional[int]=None , a__ : str=None , ) -> List[str]:
if attention_mask is None:
_UpperCamelCase = np.not_equal(a__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
_UpperCamelCase = FlaxPegasusModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ) -> Dict:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> str:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> str:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : Dict , __UpperCamelCase : str=None , **__UpperCamelCase : Dict ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self : Optional[Any] ) -> Dict:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = model_class(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__UpperCamelCase )
_UpperCamelCase = np.ones((1, 1) )
_UpperCamelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _UpperCamelCase ( self : str ) -> Any:
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__UpperCamelCase , return_tensors='''np''' , truncation=__UpperCamelCase , max_length=512 , padding=__UpperCamelCase )
_UpperCamelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCamelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded
| 54
| 1
|
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__A : Dict = logging.getLogger()
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowerCAmelCase_ : Tuple = parser.parse_args()
return args.f
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __lowercase ( self : Optional[int] ) -> None:
lowerCAmelCase_ : Any = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def __lowercase ( self : List[Any] , lowerCamelCase : Union[str, Any] ) -> str:
lowerCAmelCase_ : Union[str, Any] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
lowerCAmelCase_ : List[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def __lowercase ( self : List[str] ) -> Union[str, Any]:
lowerCAmelCase_ : Dict = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCamelCase )
lowerCAmelCase_ : Any = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCamelCase )
lowerCAmelCase_ : Any = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCamelCase )
| 120
|
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__A : Dict = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__A : List[Any] = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__A : str = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
"""simple docstring"""
def __lowercase ( self : str ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def __lowercase ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[int]=1 , lowerCamelCase : Union[str, Any]="binary" , lowerCamelCase : Any=None , lowerCamelCase : str="warn" , ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = recall_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase , zero_division=lowerCamelCase , )
return {"recall": float(lowerCamelCase ) if score.size == 1 else score}
| 120
| 1
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
__a :Union[str, Any] = 100
__a :Union[str, Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__a :int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
A_ = set()
A_ = 42
A_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __snake_case ( __UpperCamelCase : int = 5000 ):
"""simple docstring"""
for number_to_partition in range(1 ,__UpperCamelCase ):
if len(partition(__UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 329
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) )
| 329
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = 'codegen'
lowerCAmelCase : Optional[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,_UpperCAmelCase : List[Any]=50400 ,_UpperCAmelCase : Dict=2048 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Union[str, Any]=4096 ,_UpperCAmelCase : List[Any]=28 ,_UpperCAmelCase : int=16 ,_UpperCAmelCase : Optional[Any]=64 ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : List[str]="gelu_new" ,_UpperCAmelCase : Union[str, Any]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Optional[int]=1E-5 ,_UpperCAmelCase : str=0.02 ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Any=50256 ,_UpperCAmelCase : int=50256 ,_UpperCAmelCase : Any=False ,**_UpperCAmelCase : List[str] ,):
_a : Optional[Any] = vocab_size
_a : Optional[int] = n_ctx
_a : Dict = n_positions
_a : int = n_embd
_a : List[Any] = n_layer
_a : Dict = n_head
_a : Optional[int] = n_inner
_a : Optional[Any] = rotary_dim
_a : List[str] = activation_function
_a : List[str] = resid_pdrop
_a : List[str] = embd_pdrop
_a : Union[str, Any] = attn_pdrop
_a : List[str] = layer_norm_epsilon
_a : Any = initializer_range
_a : Any = use_cache
_a : Any = bos_token_id
_a : Dict = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,tie_word_embeddings=_UpperCAmelCase ,**_UpperCAmelCase )
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : List[str] ,_UpperCAmelCase : PretrainedConfig ,_UpperCAmelCase : str = "default" ,_UpperCAmelCase : List[PatchingSpec] = None ,_UpperCAmelCase : bool = False ,):
super().__init__(_UpperCAmelCase ,task=_UpperCAmelCase ,patching_specs=_UpperCAmelCase ,use_past=_UpperCAmelCase )
if not getattr(self._config ,'pad_token_id' ,_UpperCAmelCase ):
# TODO: how to do that better?
_a : Dict = 0
@property
def __lowercase ( self : Tuple ):
_a : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase ,direction='inputs' )
_a : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_a : Any = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowercase ( self : Dict ):
return self._config.n_layer
@property
def __lowercase ( self : Optional[int] ):
return self._config.n_head
def __lowercase ( self : int ,_UpperCAmelCase : PreTrainedTokenizer ,_UpperCAmelCase : int = -1 ,_UpperCAmelCase : int = -1 ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : Optional[TensorType] = None ,):
_a : List[str] = super(_UpperCAmelCase ,self ).generate_dummy_inputs(
_UpperCAmelCase ,batch_size=_UpperCAmelCase ,seq_length=_UpperCAmelCase ,is_pair=_UpperCAmelCase ,framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
_a : List[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a , _a : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_a : Optional[int] = seqlen + 2
_a : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a : Any = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
_a : Optional[Any] = common_inputs['attention_mask']
if self.use_past:
_a : str = ordered_inputs['attention_mask'].dtype
_a : Dict = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_UpperCAmelCase ,_UpperCAmelCase ,dtype=_UpperCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def __lowercase ( self : Dict ):
return 13
| 89
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89
| 1
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A:
'''simple docstring'''
def __init__( self : Dict , A_ : Dict , A_ : Optional[Any]=13 , A_ : List[Any]=7 , A_ : Any=True , A_ : str=True , A_ : Optional[int]=True , A_ : Optional[int]=True , A_ : Union[str, Any]=99 , A_ : Any=64 , A_ : Any=32 , A_ : List[str]=5 , A_ : List[str]=4 , A_ : Dict=37 , A_ : Union[str, Any]="gelu" , A_ : Optional[Any]=0.1 , A_ : List[Any]=0.1 , A_ : Any=512 , A_ : List[Any]=16 , A_ : Any=2 , A_ : Any=0.02 , A_ : Optional[Any]=3 , A_ : int=4 , A_ : List[str]=None , ) -> int:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = embedding_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def a__ ( self : int , A_ : Tuple , A_ : Optional[Any] , A_ : List[str] , A_ : Union[str, Any] , A_ : str , A_ : Optional[Any] , A_ : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = MobileBertModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ )
lowerCamelCase_ = model(A_ , token_type_ids=A_ )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : Dict , A_ : Optional[int] , A_ : Dict , A_ : Optional[Any] , A_ : Union[str, Any] , A_ : str , A_ : int , A_ : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = MobileBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Dict , A_ : List[str] , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int , A_ : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = MobileBertForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a__ ( self : int , A_ : int , A_ : List[str] , A_ : Optional[Any] , A_ : Any , A_ : str , A_ : Optional[Any] , A_ : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ = MobileBertForPreTraining(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a__ ( self : Any , A_ : int , A_ : List[Any] , A_ : Union[str, Any] , A_ : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = MobileBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Tuple , A_ : Optional[Any] , A_ : str , A_ : List[Any] , A_ : Any , A_ : Union[str, Any] , A_ : Optional[int] , A_ : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MobileBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , A_ : Tuple , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : Tuple , A_ : List[Any] , A_ : Any , A_ : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MobileBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , A_ : int , A_ : int , A_ : Dict , A_ : Dict , A_ : Optional[Any] , A_ : Dict , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = MobileBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
def a__ ( self : Optional[Any] , A_ : str , A_ : Any , A_ : Tuple=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def a__ ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = MobileBertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , hidden_size=37 )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*A_ )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*A_ )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*A_ )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*A_ )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*A_ )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*A_ )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*A_ )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
lowercase , dtype=torch.long , device=lowercase , )
lowerCamelCase : str = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(A_ )
lowerCamelCase_ = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
lowerCamelCase_ = model(A_ )[0]
lowerCamelCase_ = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , A_ )
lowerCamelCase_ = torch.tensor(
[
[
[-2.4_736_526E07, 8.2_691_656E04, 1.6_521_838E05],
[-5.7_541_704E-01, 3.9_056_022E00, 4.4_011_507E00],
[2.6_047_359E00, 1.5_677_652E00, -1.7_324_188E-01],
]
] , device=A_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase_ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase_ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 368
|
from ..utils import DummyObject, requires_backends
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''keras_nlp''']
def __init__( self : Optional[int] , *A_ : Any , **A_ : Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['keras_nlp'] )
| 208
| 0
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A_ ( _a ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ ) -> Optional[int]:
with open(lowercase__ , encoding='''utf-8''' ) as input_file:
__UpperCAmelCase = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
__UpperCAmelCase = input_file.read()
__UpperCAmelCase = regexp.search(lowercase__ )
return match
def lowerCAmelCase_ (self , lowercase__ ) -> Union[str, Any]:
with open(lowercase__ , encoding='''utf-8''' ) as input_file:
__UpperCAmelCase = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
__UpperCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__UpperCAmelCase = regexp.finditer(lowercase__ )
__UpperCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = Path('''./datasets''' )
__UpperCAmelCase = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase__ ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = Path('''./datasets''' )
__UpperCAmelCase = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowercase__ ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 333
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333
| 1
|
from ..utils import DummyObject, requires_backends
class __A( metaclass=a ):
snake_case_ = ['''keras_nlp''']
def __init__( self , *_snake_case , **_snake_case ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''keras_nlp'''] )
| 33
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A( a ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a = bertabert.config.encoder.vocab_size
__a = tokenizer.sep_token_id
__a = tokenizer.cls_token_id
__a = 128
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__a = train_dataset.select(range(32 ) )
__a = val_dataset.select(range(16 ) )
__a = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__a = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
__a = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
__a = inputs.input_ids
__a = inputs.attention_mask
__a = outputs.input_ids
__a = outputs.input_ids.copy()
__a = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__a = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
__a = pred.label_ids
__a = pred.predictions
# all unnecessary tokens are removed
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
__a = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__a = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__a = self.get_auto_remove_tmp_dir()
__a = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__a = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 33
| 1
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
a__ : Dict = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : int = 1_0_1 ) -> Dict:
__SCREAMING_SNAKE_CASE = length
def __len__( self : Optional[Any] ) -> Tuple:
return self.length
def __getitem__( self : List[Any] , UpperCAmelCase__ : Any ) -> int:
return i
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : List[Any] , UpperCAmelCase__ : str ) -> str:
return {"input_ids": torch.tensor(UpperCAmelCase__ ), "labels": torch.tensor(UpperCAmelCase__ )}
class UpperCamelCase_ ( nn.Module):
"""simple docstring"""
def __init__( self : List[Any] ) -> Optional[int]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__SCREAMING_SNAKE_CASE = nn.Linear(1_2_0 , 8_0 )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=None ) -> str:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
__SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE = F"""--output_dir {output_dir}""".split()
__SCREAMING_SNAKE_CASE = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
__SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE = F"""--output_dir {output_dir}""".split()
__SCREAMING_SNAKE_CASE = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
a__ : Any = HfArgumentParser((TrainingArguments,))
a__ : Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
a__ : int = DummyDataset(dataset_length)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(range(len(lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
a__ : str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
a__ : Dict = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a__ : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a__ : str = 2
a__ : Any = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a__ : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a__ : int = None
| 54
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
'''simple docstring'''
if start is None:
__SCREAMING_SNAKE_CASE = 0
if end is None:
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) - 1
if start >= end:
return
__SCREAMING_SNAKE_CASE = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 54
| 1
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __A ( __lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Tuple = BarthezTokenizer
lowerCAmelCase_ : Tuple = BarthezTokenizerFast
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Union[str, Any] = True
def lowercase__ ( self : Union[str, Any] ):
super().setUp()
lowerCAmelCase : Tuple = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCamelCase__ )
lowerCAmelCase : Dict = tokenizer
def lowercase__ ( self : Tuple ):
lowerCAmelCase : str = '''<pad>'''
lowerCAmelCase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCamelCase__ ) , 101122 )
def lowercase__ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase : Tuple = [0, 57, 3018, 70307, 91, 2]
lowerCAmelCase : Any = self.tokenizer(
lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='pt' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowercase__ ( self : int ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase : List[Any] = self.get_tokenizer()
lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase : Optional[Any] = tokenizer.tokenize(lowerCamelCase__ )
lowerCAmelCase : Optional[Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase : Tuple = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase : int = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase : Tuple = self.get_rust_tokenizer()
lowerCAmelCase : Tuple = tokenizer.encode(lowerCamelCase__ )
lowerCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCAmelCase : int = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=lowerCamelCase__ , )
| 357
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323
| 0
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCAmelCase__ :Tuple = 1_0_0
lowerCAmelCase__ :Tuple = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCAmelCase__ :int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def lowerCAmelCase__ ( a__: int ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCAmelCase = set()
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCAmelCase__ ( a__: int = 5_0_0_0 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , a__ ):
if len(partition(a__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 329
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : str = 'ctrl'
_a : Tuple = ['past_key_values']
_a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329
| 1
|
import string
import numpy
def snake_case (__lowercase , __lowercase ) -> int:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , _lowercase )
class lowercase_ :
_lowerCamelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_lowerCamelCase = numpy.vectorize(lambda __snake_case : x % 36 )
_lowerCamelCase = numpy.vectorize(__snake_case )
def __init__( self , lowercase_ ):
_snake_case : str = self.modulus(__A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_snake_case : Optional[Any] = encrypt_key.shape[0]
def UpperCamelCase ( self , lowercase_ ):
return self.key_string.index(__A )
def UpperCamelCase ( self , lowercase_ ):
return self.key_string[round(__A )]
def UpperCamelCase ( self ):
_snake_case : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_snake_case : List[str] = det % len(self.key_string )
_snake_case : str = len(self.key_string )
if greatest_common_divisor(__A , len(self.key_string ) ) != 1:
_snake_case : Optional[int] = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__A )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Tuple = [char for char in text.upper() if char in self.key_string]
_snake_case : Optional[Any] = chars[-1]
while len(__A ) % self.break_key != 0:
chars.append(__A )
return "".join(__A )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Any = self.process_text(text.upper() )
_snake_case : List[str] = ""
for i in range(0 , len(__A ) - self.break_key + 1 , self.break_key ):
_snake_case : Any = text[i : i + self.break_key]
_snake_case : Dict = [self.replace_letters(__A ) for char in batch]
_snake_case : Optional[int] = numpy.array([vec] ).T
_snake_case : int = self.modulus(self.encrypt_key.dot(__A ) ).T.tolist()[
0
]
_snake_case : List[str] = "".join(
self.replace_digits(__A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase ( self ):
_snake_case : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_snake_case : List[str] = det % len(self.key_string )
_snake_case : Optional[Any] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_snake_case : Optional[Any] = i
break
_snake_case : List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__A ) )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : int = self.make_decrypt_key()
_snake_case : Dict = self.process_text(text.upper() )
_snake_case : Union[str, Any] = ""
for i in range(0 , len(__A ) - self.break_key + 1 , self.break_key ):
_snake_case : Optional[Any] = text[i : i + self.break_key]
_snake_case : Any = [self.replace_letters(__A ) for char in batch]
_snake_case : List[Any] = numpy.array([vec] ).T
_snake_case : Dict = self.modulus(decrypt_key.dot(__A ) ).T.tolist()[0]
_snake_case : List[Any] = "".join(
self.replace_digits(__A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def snake_case () -> str:
'''simple docstring'''
_snake_case : Dict = int(input("Enter the order of the encryption key: " ) )
_snake_case : Tuple = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(_lowercase ):
_snake_case : List[Any] = [int(_lowercase ) for x in input().split()]
hill_matrix.append(_lowercase )
_snake_case : Optional[Any] = HillCipher(numpy.array(_lowercase ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
_snake_case : Any = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
_snake_case : List[str] = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(_lowercase ) )
elif option == "2":
_snake_case : Any = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 359
|
from __future__ import annotations
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
_snake_case : Any = sorted(numsa + numsa )
_snake_case ,_snake_case : Any = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Union[str, Any] = [float(x) for x in input('Enter the elements of first array: ').split()]
__SCREAMING_SNAKE_CASE : List[Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 284
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : List[Any] = "Hello world! cécé herlolip"
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = FairseqRobertaModel.from_pretrained(_lowerCAmelCase )
roberta.eval() # disable dropout
snake_case_ = roberta.model.encoder.sentence_encoder
snake_case_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
snake_case_ = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print("Our RoBERTa config:" , _lowerCAmelCase )
snake_case_ = XLMRobertaXLForSequenceClassification(_lowerCAmelCase ) if classification_head else XLMRobertaXLForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ = roberta_sent_encoder.embed_tokens.weight
snake_case_ = roberta_sent_encoder.embed_positions.weight
snake_case_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ = roberta_sent_encoder.layer_norm.weight
snake_case_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ = model.roberta.encoder.layer[i]
snake_case_ = roberta_sent_encoder.layers[i]
snake_case_ = layer.attention
snake_case_ = roberta_layer.self_attn_layer_norm.weight
snake_case_ = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ = roberta_layer.self_attn.q_proj.weight
snake_case_ = roberta_layer.self_attn.q_proj.bias
snake_case_ = roberta_layer.self_attn.k_proj.weight
snake_case_ = roberta_layer.self_attn.k_proj.bias
snake_case_ = roberta_layer.self_attn.v_proj.weight
snake_case_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ = roberta_layer.self_attn.out_proj.weight
snake_case_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ = roberta_layer.final_layer_norm.weight
snake_case_ = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ = roberta_layer.fca.weight
snake_case_ = roberta_layer.fca.bias
# output
snake_case_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ = roberta_layer.fca.weight
snake_case_ = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ = roberta.model.classification_heads['mnli'].dense.weight
snake_case_ = roberta.model.classification_heads['mnli'].dense.bias
snake_case_ = roberta.model.classification_heads['mnli'].out_proj.weight
snake_case_ = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
snake_case_ = roberta.model.encoder.lm_head.dense.weight
snake_case_ = roberta.model.encoder.lm_head.dense.bias
snake_case_ = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ = roberta.model.encoder.lm_head.weight
snake_case_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ = roberta.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
snake_case_ = model(_lowerCAmelCase )[0]
if classification_head:
snake_case_ = roberta.model.classification_heads['mnli'](roberta.extract_features(_lowerCAmelCase ) )
else:
snake_case_ = roberta.model(_lowerCAmelCase )[0]
print(our_output.shape , their_output.shape )
snake_case_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
snake_case_ = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowercase__ : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 187
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 208
| 0
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _lowerCAmelCase ( lowerCAmelCase = 1000000 , lowerCAmelCase = 10 ):
'''simple docstring'''
UpperCAmelCase = defaultdict(lowercase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'{solution() = }')
| 362
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ : Any = ['''gpt2''']
lowerCAmelCase_ : Optional[int] = '''gpt2'''
if is_tf_available():
class UpperCamelCase_ ( tf.Module ):
def __init__( self , snake_case__ ) -> List[str]:
"""simple docstring"""
super().__init__()
UpperCAmelCase = tokenizer
UpperCAmelCase = AutoConfig.from_pretrained(snake_case__ )
UpperCAmelCase = TFGPTaLMHeadModel.from_config(snake_case__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def UpperCamelCase_ ( self , snake_case__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(snake_case__ )
UpperCAmelCase = tokenized["""input_ids"""].to_tensor()
UpperCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase = self.model(input_ids=snake_case__ , attention_mask=snake_case__ )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
UpperCAmelCase = [GPTaTokenizer.from_pretrained(snake_case__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase = [TFGPTaTokenizer.from_pretrained(snake_case__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase = tokenizer([test_inputs] , return_tensors="""tf""" )
UpperCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase = python_outputs[key].numpy()
UpperCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case__ , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase = tf.function(snake_case__ )
for test_inputs in self.test_sentences:
UpperCAmelCase = tf.constant(snake_case__ )
UpperCAmelCase = compiled_tokenizer(snake_case__ )
UpperCAmelCase = tf_tokenizer(snake_case__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase = ModelToSave(tokenizer=snake_case__ )
UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase = model.serving(snake_case__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase = Path(snake_case__ ) / """saved.model"""
tf.saved_model.save(snake_case__ , snake_case__ , signatures={"""serving_default""": model.serving} )
UpperCAmelCase = tf.saved_model.load(snake_case__ )
UpperCAmelCase = loaded_model.signatures["""serving_default"""](snake_case__ )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase = tf_tokenizer(snake_case__ ) # Build model with some sample inputs
UpperCAmelCase = tf_tokenizer.get_config()
UpperCAmelCase = TFGPTaTokenizer.from_config(snake_case__ )
UpperCAmelCase = model_from_config(snake_case__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase = 12_31_23
for max_length in [3, 5, 10_24]:
UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase = tf_tokenizer(snake_case__ , max_length=snake_case__ )
UpperCAmelCase = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 248
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class _UpperCAmelCase ( _A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
SCREAMING_SNAKE_CASE_ : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
SCREAMING_SNAKE_CASE_ : str = "question"
SCREAMING_SNAKE_CASE_ : str = "context"
SCREAMING_SNAKE_CASE_ : str = "answers"
@property
def A ( self : Any ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 33
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 33
| 1
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
if n == 1 or not isinstance(a__ , a__ ):
return 0
elif n == 2:
return 1
else:
lowercase_ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : List[str] = 2
while digits < n:
index += 1
lowercase_ : List[str] = len(str(fibonacci(a__ ) ) )
return index
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 1000 ):
"""simple docstring"""
return fibonacci_digits_index(a__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 361
|
'''simple docstring'''
import qiskit
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
lowercase_ : Dict = qiskit.QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowercase_ : Union[str, Any] = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 264
| 0
|
from random import randint, random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 5 , ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = [[-1] * number_of_cells] # Create a highway without any car
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :Any = max(SCREAMING_SNAKE_CASE , 0 )
while i < number_of_cells:
__UpperCamelCase :Union[str, Any] = (
randint(0 , SCREAMING_SNAKE_CASE ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = 0
__UpperCamelCase :Optional[int] = highway_now[car_index + 1 :]
for cell in range(len(SCREAMING_SNAKE_CASE ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(SCREAMING_SNAKE_CASE , -1 )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = len(SCREAMING_SNAKE_CASE )
# Beforce calculations, the highway is empty
__UpperCamelCase :List[str] = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__UpperCamelCase :Optional[int] = min(highway_now[car_index] + 1 , SCREAMING_SNAKE_CASE )
# Number of empty cell before the next car
__UpperCamelCase :Dict = get_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - 1
# We can't have the car causing an accident
__UpperCamelCase :Tuple = min(next_highway[car_index] , SCREAMING_SNAKE_CASE )
if random() < probability:
# Randomly, a driver will slow down
__UpperCamelCase :Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = len(highway[0] )
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = update(highway[i] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__UpperCamelCase :Dict = (car_index + speed) % number_of_cells
# Commit the change of position
__UpperCamelCase :Union[str, Any] = speed
highway.append(SCREAMING_SNAKE_CASE )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCAmelCase__ ( lowercase_ ):
'''simple docstring'''
__UpperCamelCase = "decision_transformer"
__UpperCamelCase = ["past_key_values"]
__UpperCamelCase = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=17 , lowercase_ : List[Any]=4 , lowercase_ : Dict=128 , lowercase_ : Dict=4096 , lowercase_ : str=True , lowercase_ : Dict=1 , lowercase_ : int=1024 , lowercase_ : int=3 , lowercase_ : Dict=1 , lowercase_ : str=None , lowercase_ : Optional[Any]="relu" , lowercase_ : Optional[int]=0.1 , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : str=1e-5 , lowercase_ : int=0.02 , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : str=50256 , lowercase_ : List[str]=50256 , lowercase_ : List[Any]=False , lowercase_ : Any=False , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = state_dim
SCREAMING_SNAKE_CASE_ : List[str] = act_dim
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = max_ep_len
SCREAMING_SNAKE_CASE_ : int = action_tanh
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = n_positions
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n_layer
SCREAMING_SNAKE_CASE_ : Any = n_head
SCREAMING_SNAKE_CASE_ : Optional[Any] = n_inner
SCREAMING_SNAKE_CASE_ : Optional[Any] = activation_function
SCREAMING_SNAKE_CASE_ : Optional[Any] = resid_pdrop
SCREAMING_SNAKE_CASE_ : Tuple = embd_pdrop
SCREAMING_SNAKE_CASE_ : Optional[int] = attn_pdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = scale_attn_weights
SCREAMING_SNAKE_CASE_ : List[Any] = use_cache
SCREAMING_SNAKE_CASE_ : Any = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE_ : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE_ : List[str] = bos_token_id
SCREAMING_SNAKE_CASE_ : Any = eos_token_id
super().__init__(bos_token_id=a__ , eos_token_id=a__ , **a__)
| 354
|
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 8.3_1_4_4_5_9_8
def _A (__a , __a ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCAmelCase_ : str = 300
UpperCAmelCase_ : str = 28
UpperCAmelCase_ : Any = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 318
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase__ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
lowerCAmelCase__ = {
"""facebook/m2m100_418M""": 1_0_2_4,
}
# fmt: off
lowerCAmelCase__ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = ['input_ids', 'attention_mask']
__lowerCamelCase = []
__lowerCamelCase = []
def __init__( self , lowercase , lowercase , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<pad>" , lowercase="<unk>" , lowercase="m2m100" , lowercase = None , lowercase=8 , **lowercase , ) -> None:
'''simple docstring'''
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = language_codes
A__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
A__ = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
A__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase , tgt_lang=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , language_codes=lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase , **lowercase , )
A__ = vocab_file
A__ = load_json(lowercase )
A__ = {v: k for k, v in self.encoder.items()}
A__ = spm_file
A__ = load_spm(lowercase , self.sp_model_kwargs )
A__ = len(self.encoder )
A__ = {
self.get_lang_token(lowercase ): self.encoder_size + i for i, lang_code in enumerate(lowercase )
}
A__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase )}
A__ = {v: k for k, v in self.lang_token_to_id.items()}
A__ = src_lang if src_lang is not None else "en"
A__ = tgt_lang
A__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
A__ = num_madeup_words
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase )
def UpperCamelCase ( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowercase , self.encoder[self.unk_token] )
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowercase , self.unk_token )
def UpperCamelCase ( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = []
A__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
A__ = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
A__ = [1] * len(self.prefix_tokens )
A__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase )) + suffix_ones
return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones
def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , lowercase ) -> None:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
'''simple docstring'''
A__ = Path(lowercase )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
A__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
A__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase )
elif not os.path.isfile(self.spm_file ):
with open(lowercase , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (str(lowercase ), str(lowercase ))
def UpperCamelCase ( self , lowercase , lowercase = "en" , lowercase = None , lowercase = "ro" , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
A__ = src_lang
A__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , **lowercase ) -> Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A__ = src_lang
A__ = self(lowercase , add_special_tokens=lowercase , **lowercase )
A__ = self.get_lang_id(lowercase )
A__ = tgt_lang_id
return inputs
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
A__ = self.get_lang_token(lowercase )
A__ = self.lang_token_to_id[lang_token]
A__ = [self.cur_lang_id]
A__ = [self.eos_token_id]
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
A__ = self.get_lang_token(lowercase )
A__ = self.lang_token_to_id[lang_token]
A__ = [self.cur_lang_id]
A__ = [self.eos_token_id]
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
A__ = self.get_lang_token(lowercase )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
A__ = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE_ )
spm.Load(str(SCREAMING_SNAKE_CASE_ ) )
return spm
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[Dict, List]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str ) -> None:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=2 )
| 68
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_snake_case : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_snake_case : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_snake_case : set[int] = {ord(char) for char in VALID_CHARS}
_snake_case : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCAmelCase_ : list[int], lowerCAmelCase_ : tuple[int, ...] ):
__lowerCAmelCase = ""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(lowerCAmelCase_ ), lowerCAmelCase_ ):
__lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase_ )
return decoded
def a_ ( lowerCAmelCase_ : list[int] ):
__lowerCAmelCase = []
for key in product(lowerCAmelCase_, repeat=3 ):
__lowerCAmelCase = try_key(lowerCAmelCase_, lowerCAmelCase_ )
if encoded is not None:
possibles.append(lowerCAmelCase_ )
return possibles
def a_ ( lowerCAmelCase_ : list[str], lowerCAmelCase_ : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCAmelCase_ : str = "p059_cipher.txt" ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = Path(lowerCAmelCase_ ).parent.joinpath(lowerCAmelCase_ ).read_text(encoding='utf-8' )
__lowerCAmelCase = [int(lowerCAmelCase_ ) for number in data.strip().split(',' )]
__lowerCAmelCase = filter_valid_chars(lowerCAmelCase_ )
for common_word in COMMON_WORDS:
__lowerCAmelCase = filter_common_word(lowerCAmelCase_, lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 1:
break
__lowerCAmelCase = possibles[0]
return sum(ord(lowerCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 284
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class A__ ( _a ):
lowercase = """data2vec-text"""
def __init__( self , UpperCamelCase__=30522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class A__ ( _a ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 368
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 10_00 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 101
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __lowerCamelCase ( A__ ) -> None:
"""simple docstring"""
create_state_space_tree(A__ , [] , 0 )
def __lowerCamelCase ( A__ , A__ , A__ ) -> None:
"""simple docstring"""
if index == len(A__ ):
print(A__ )
return
create_state_space_tree(A__ , A__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(A__ , A__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 28
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__snake_case : Dict = logging.get_logger(__name__)
class A__(a_ ):
"""simple docstring"""
_A : Dict = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = 8 , **_lowercase , ) -> None:
super().__init__(**_lowercase )
a_ : Tuple = do_rescale
a_ : Dict = rescale_factor
a_ : int = do_pad
a_ : Optional[int] = pad_size
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase ) -> np.ndarray:
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None ) -> int:
a_ , a_ : str = get_image_size(_lowercase )
a_ : Tuple = (old_height // size + 1) * size - old_height
a_ : List[Any] = (old_width // size + 1) * size - old_width
return pad(_lowercase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> List[str]:
a_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
a_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : Tuple = do_pad if do_pad is not None else self.do_pad
a_ : Tuple = pad_size if pad_size is not None else self.pad_size
a_ : Tuple = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
a_ : Tuple = [to_numpy_array(_lowercase ) for image in images]
if do_rescale:
a_ : Dict = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_pad:
a_ : str = [self.pad(_lowercase , size=_lowercase ) for image in images]
a_ : Optional[int] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
a_ : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 248
| 0
|
'''simple docstring'''
def UpperCAmelCase ( a_ = 5_0_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
A_ : Union[str, Any] = set()
A_ : List[str] = int((limit - 2_4) ** (1 / 2) )
A_ : Dict = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_ ) ) )
for primea in primes:
A_ : Union[str, Any] = primea * primea
for primea in primes:
A_ : Optional[int] = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
A_ : Tuple = primea * primea * primea * primea
A_ : List[str] = square + cube + tetr
if total >= limit:
break
ret.add(a_ )
return len(a_ )
if __name__ == "__main__":
print(f'{solution() = }')
| 164
|
'''simple docstring'''
UpperCamelCase__ : int = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 164
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowercase ( UpperCamelCase__ ):
_a = "visual_bert"
def __init__( self , _a=3_0522 , _a=768 , _a=512 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=False , _a=True , _a=1 , _a=0 , _a=2 , **_a , ) -> Tuple:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : int = vocab_size
_A : Dict = max_position_embeddings
_A : Optional[Any] = hidden_size
_A : List[Any] = visual_embedding_dim
_A : Optional[Any] = num_hidden_layers
_A : Tuple = num_attention_heads
_A : str = intermediate_size
_A : Dict = hidden_act
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Optional[int] = initializer_range
_A : List[Any] = type_vocab_size
_A : int = layer_norm_eps
_A : Optional[int] = bypass_transformer
_A : List[Any] = special_visual_initialize
| 26
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] ):
snake_case_ : str = []
def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : Tuple ):
self.events.append('''on_init_end''' )
def _snake_case ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_train_begin''' )
def _snake_case ( self : Any , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_train_end''' )
def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , **lowercase_ : List[Any] ):
self.events.append('''on_epoch_begin''' )
def _snake_case ( self : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
self.events.append('''on_epoch_end''' )
def _snake_case ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , **lowercase_ : Optional[Any] ):
self.events.append('''on_step_begin''' )
def _snake_case ( self : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : List[str] ):
self.events.append('''on_step_end''' )
def _snake_case ( self : str , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_evaluate''' )
def _snake_case ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] , **lowercase_ : str ):
self.events.append('''on_predict''' )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , **lowercase_ : Union[str, Any] ):
self.events.append('''on_save''' )
def _snake_case ( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : Any ):
self.events.append('''on_log''' )
def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_prediction_step''' )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
snake_case_ : Tuple = tempfile.mkdtemp()
def _snake_case ( self : Tuple ):
shutil.rmtree(self.output_dir )
def _snake_case ( self : int , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=64 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=None , lowercase_ : Any=False , **lowercase_ : List[Any] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : int = RegressionDataset(length=lowercase_ )
snake_case_ : Any = RegressionDataset(length=lowercase_ )
snake_case_ : int = RegressionModelConfig(a=lowercase_ , b=lowercase_ )
snake_case_ : Tuple = RegressionPreTrainedModel(lowercase_ )
snake_case_ : Any = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
snake_case_ : Any = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
snake_case_ : List[str] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def _snake_case ( self : Optional[Any] , lowercase_ : Tuple ):
snake_case_ : Tuple = ['''on_init_end''', '''on_train_begin''']
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = len(trainer.get_eval_dataloader() )
snake_case_ : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
snake_case_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=lowercase_ )
snake_case_ : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : int ):
snake_case_ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : Dict = self.get_trainer()
snake_case_ : Optional[int] = trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
snake_case_ : Optional[int] = self.get_trainer()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : List[Any] = self.get_trainer()
snake_case_ : Optional[int] = trainer.callback_handler.callbacks[0]
snake_case_ : Optional[Any] = trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : List[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=lowercase_ )
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
snake_case_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
snake_case_ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
snake_case_ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0]
| 264
| 0
|
'''simple docstring'''
from __future__ import annotations
__lowercase: Any = 10
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = max(lowercase__ )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCamelCase__ = [[] for _ in range(lowercase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCamelCase__ = int((i / placement) % RADIX )
buckets[tmp].append(lowercase__ )
# put each buckets' contents into list_of_ints
UpperCamelCase__ = 0
for b in range(lowercase__ ):
for i in buckets[b]:
UpperCamelCase__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 0
|
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = multiprocessing.Manager()
lowerCAmelCase_ :Union[str, Any] = manager.list()
lowerCAmelCase_ :Any = multiprocessing.Process(target=lowercase__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowerCAmelCase_ :Union[str, Any] = shutil.rmtree
lowerCAmelCase_ :str = os.rmdir
lowerCAmelCase_ :Any = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowerCAmelCase_ :Any = {}
with swallow_io():
with time_limit(lowercase__ ):
exec(lowercase__ , lowercase__ )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
lowerCAmelCase_ :Dict = rmtree
lowerCAmelCase_ :List[str] = rmdir
lowerCAmelCase_ :int = chdir
@contextlib.contextmanager
def _snake_case ( lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
def signal_handler(lowercase__ : List[Any] , lowercase__ : Dict ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , lowercase__ )
signal.signal(signal.SIGALRM , lowercase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase__ ):
with contextlib.redirect_stderr(lowercase__ ):
with redirect_stdin(lowercase__ ):
yield
@contextlib.contextmanager
def _snake_case ( ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase__ ):
yield dirname
class _SCREAMING_SNAKE_CASE ( A__ ):
pass
class _SCREAMING_SNAKE_CASE ( io.StringIO ):
def __lowerCAmelCase ( self , *__A , **__A ) -> List[str]:
raise OSError
def __lowerCAmelCase ( self , *__A , **__A ) -> Optional[int]:
raise OSError
def __lowerCAmelCase ( self , *__A , **__A ) -> List[Any]:
raise OSError
def __lowerCAmelCase ( self , *__A , **__A ) -> Dict:
return False
class _SCREAMING_SNAKE_CASE ( contextlib._RedirectStream ): # type: ignore
UpperCAmelCase_ :Union[str, Any] = "stdin"
@contextlib.contextmanager
def _snake_case ( lowercase__ : Dict ) -> Dict:
'''simple docstring'''
if root == ".":
yield
return
lowerCAmelCase_ :List[Any] = os.getcwd()
os.chdir(lowercase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase__ )
def _snake_case ( lowercase__ : List[str]=None ) -> List[str]:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :Optional[int] = None
import os
lowerCAmelCase_ :List[str] = """1"""
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :Union[str, Any] = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Union[str, Any] = None
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ :Optional[Any] = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :str = None
lowerCAmelCase_ :Optional[Any] = None
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :int = None
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :Union[str, Any] = None
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :Optional[Any] = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :Optional[Any] = None
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Dict = None
import shutil
lowerCAmelCase_ :Union[str, Any] = None
lowerCAmelCase_ :int = None
lowerCAmelCase_ :int = None
import subprocess
lowerCAmelCase_ :Dict = None # type: ignore
lowerCAmelCase_ :Dict = None
import sys
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :int = None
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Union[str, Any] = None
lowerCAmelCase_ :Optional[Any] = None
| 84
|
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCAmelCase_ ( __lowercase , __lowercase ):
lowerCamelCase : str = '''nat'''
lowerCamelCase : int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Optional[int]=6_4 , UpperCAmelCase__ : List[str]=[3, 4, 6, 5] , UpperCAmelCase__ : Optional[Any]=[2, 4, 8, 1_6] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Optional[int]=3.0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any=1E-5 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : Optional[int] , ) -> Union[str, Any]:
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = num_heads
lowerCAmelCase = kernel_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
lowerCAmelCase = layer_scale_init_value
lowerCAmelCase = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
| 55
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = torch.exp(lowerCamelCase )
lowerCAmelCase = torch.sum(lowerCamelCase , dim=1 ) # sum of exp(x_i)
lowerCAmelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCamelCase ) - B / A
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : int , UpperCAmelCase__ : int ) -> str:
super().__init__()
lowerCAmelCase = config.output_attentions
lowerCAmelCase = config.output_hidden_states
lowerCAmelCase = nn.ModuleList([BertLayer(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase = nn.ModuleList([BertHighway(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> int:
if (type(UpperCAmelCase__ ) is float) or (type(UpperCAmelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCAmelCase = x
else:
lowerCAmelCase = x
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
lowerCAmelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , ) -> str:
lowerCAmelCase = ()
lowerCAmelCase = ()
lowerCAmelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCAmelCase = all_hidden_states + (hidden_states,)
lowerCAmelCase = layer_module(
UpperCAmelCase__ , UpperCAmelCase__ , head_mask[i] , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = layer_outputs[0]
if self.output_attentions:
lowerCAmelCase = all_attentions + (layer_outputs[1],)
lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase = current_outputs + (all_attentions,)
lowerCAmelCase = self.highway[i](UpperCAmelCase__ )
# logits, pooled_output
if not self.training:
lowerCAmelCase = highway_exit[0]
lowerCAmelCase = entropy(UpperCAmelCase__ )
lowerCAmelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCAmelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCAmelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase__ , i + 1 )
else:
lowerCAmelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCAmelCase = all_hidden_states + (hidden_states,)
lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase = outputs + (all_attentions,)
lowerCAmelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> str:
super().__init__(UpperCAmelCase__ )
lowerCAmelCase = config
lowerCAmelCase = BertEmbeddings(UpperCAmelCase__ )
lowerCAmelCase = DeeBertEncoder(UpperCAmelCase__ )
lowerCAmelCase = BertPooler(UpperCAmelCase__ )
self.init_weights()
def __UpperCAmelCase ( self : Any ) -> int:
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Dict ) -> List[Any]:
lowerCAmelCase = value
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Dict:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase__ )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , ) -> Optional[int]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if encoder_attention_mask is None:
lowerCAmelCase = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if token_type_ids is None:
lowerCAmelCase = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCAmelCase = encoder_attention_mask[:, None, None, :]
lowerCAmelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCAmelCase = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers )
lowerCAmelCase = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ )
lowerCAmelCase = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase = encoder_outputs[0]
lowerCAmelCase = self.pooler(UpperCAmelCase__ )
lowerCAmelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ) -> Dict:
lowerCAmelCase = message
lowerCAmelCase = exit_layer # start from 1!
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
super().__init__()
lowerCAmelCase = BertPooler(UpperCAmelCase__ )
lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Dict ) -> Optional[int]:
# Pooler
lowerCAmelCase = encoder_outputs[0]
lowerCAmelCase = self.pooler(UpperCAmelCase__ )
# "return" pooler_output
# BertModel
lowerCAmelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCAmelCase = bmodel_output[1]
lowerCAmelCase = self.dropout(UpperCAmelCase__ )
lowerCAmelCase = self.classifier(UpperCAmelCase__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , UpperCAmelCase__ : Dict ) -> Any:
super().__init__(UpperCAmelCase__ )
lowerCAmelCase = config.num_labels
lowerCAmelCase = config.num_hidden_layers
lowerCAmelCase = DeeBertModel(UpperCAmelCase__ )
lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=-1 , UpperCAmelCase__ : Optional[Any]=False , ) -> Dict:
lowerCAmelCase = self.num_layers
try:
lowerCAmelCase = self.bert(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCAmelCase = outputs[1]
lowerCAmelCase = self.dropout(UpperCAmelCase__ )
lowerCAmelCase = self.classifier(UpperCAmelCase__ )
lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase = e.message
lowerCAmelCase = e.exit_layer
lowerCAmelCase = outputs[0]
if not self.training:
lowerCAmelCase = entropy(UpperCAmelCase__ )
lowerCAmelCase = []
lowerCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase = MSELoss()
lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase = CrossEntropyLoss()
lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCAmelCase = []
for highway_exit in outputs[-1]:
lowerCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase = MSELoss()
lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase = CrossEntropyLoss()
lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase__ )
if train_highway:
lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase = (loss,) + outputs
if not self.training:
lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 55
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowerCAmelCase = CLIPTextModel(_snake_case )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert("""RGB""" )
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case )
_lowerCAmelCase = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = sd_pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case )
_lowerCAmelCase = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = """french fries"""
_lowerCAmelCase = sd_pipe(**_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case )
_lowerCAmelCase = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = [inputs["""prompt"""]] * 2
_lowerCAmelCase = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
_lowerCAmelCase = torch.from_numpy(_snake_case ).unsqueeze(0 ).to(_snake_case )
_lowerCAmelCase = image / 2 + 0.5
_lowerCAmelCase = image.permute(0 , 3 , 1 , 2 )
_lowerCAmelCase = image.repeat(2 , 1 , 1 , 1 )
_lowerCAmelCase = sd_pipe(**_snake_case ).images
_lowerCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_lowerCAmelCase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case )
_lowerCAmelCase = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = sd_pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = [round(_snake_case , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_snake_case ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case )
_lowerCAmelCase = VaeImageProcessor(do_resize=_snake_case , do_normalize=_snake_case )
_lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = pipe(**self.get_dummy_inputs_by_type(_snake_case , input_image_type="""pt""" ) )[0]
_lowerCAmelCase = components["""vae"""]
_lowerCAmelCase = self.get_dummy_inputs_by_type(_snake_case , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_lowerCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
_lowerCAmelCase = pipe(**_snake_case )[0]
_lowerCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(_snake_case , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = torch.manual_seed(_snake_case )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
_lowerCAmelCase = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = self.get_inputs()
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_snake_case )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = self.get_inputs()
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_snake_case )
_lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = self.get_inputs()
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 0
def callback_fn(_snake_case , _snake_case , _snake_case ) -> None:
_lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase = latents[0, -3:, -3:, -1]
_lowerCAmelCase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase = latents[0, -3:, -3:, -1]
_lowerCAmelCase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_lowerCAmelCase = False
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_snake_case , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = self.get_inputs()
pipe(**_snake_case , callback=_snake_case , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_snake_case , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = self.get_inputs()
_lowerCAmelCase = pipe(**_snake_case )
_lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCAmelCase = inputs["""image"""].resize((504, 504) )
_lowerCAmelCase = """timbrooks/instruct-pix2pix"""
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_snake_case , safety_checker=_snake_case , )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = pipe(**_snake_case )
_lowerCAmelCase = output.images[0]
_lowerCAmelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
_lowerCAmelCase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 82
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ :List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : str =XLNetTokenizer
lowercase_ : Dict =XLNetTokenizerFast
lowercase_ : str =True
lowercase_ : str =True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = XLNetTokenizer(A__ ,keep_accents=A__)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
lowercase = '''<s>'''
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__) ,A__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__) ,A__)
def A__ ( self):
lowercase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'''<unk>''')
self.assertEqual(vocab_keys[1] ,'''<s>''')
self.assertEqual(vocab_keys[-1] ,'''<eod>''')
self.assertEqual(len(A__) ,1_0_0_6)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_0)
def A__ ( self):
lowercase = XLNetTokenizer(A__ ,keep_accents=A__)
lowercase = tokenizer.tokenize('''This is a test''')
self.assertListEqual(A__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2])
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowercase = tokenizer.convert_tokens_to_ids(A__)
self.assertListEqual(A__ ,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4])
lowercase = tokenizer.convert_ids_to_tokens(A__)
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def A__ ( self):
lowercase = XLNetTokenizer(A__ ,do_lower_case=A__)
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] ,)
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''▁he''', '''ll''', '''o'''])
def A__ ( self):
lowercase = XLNetTokenizer(A__ ,do_lower_case=A__)
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] ,)
@slow
def A__ ( self):
lowercase = XLNetTokenizer.from_pretrained('''xlnet-base-cased''')
lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=A__)
lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__ ,A__)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def A__ ( self):
# fmt: off
lowercase = {'''input_ids''': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ ,model_name='''xlnet-base-cased''' ,revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' ,)
| 101
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__A = logging.get_logger(__name__)
class snake_case ( __snake_case ):
def __init__( self : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple)-> None:
'''simple docstring'''
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
| 108
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = PriorTransformer
SCREAMING_SNAKE_CASE_ : List[str] = """hidden_states"""
@property
def lowercase_ ( self : Dict)-> str:
'''simple docstring'''
__lowerCAmelCase: str = 4
__lowerCAmelCase: int = 8
__lowerCAmelCase: int = 7
__lowerCAmelCase: str = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: Any = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : str=0)-> str:
'''simple docstring'''
torch.manual_seed(UpperCamelCase__)
__lowerCAmelCase: List[Any] = 4
__lowerCAmelCase: Dict = 8
__lowerCAmelCase: int = 7
__lowerCAmelCase: List[str] = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: Tuple = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowercase_ ( self : Dict)-> List[Any]:
'''simple docstring'''
return (4, 8)
@property
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
return (4, 8)
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: str = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
__lowerCAmelCase: Any = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(UpperCamelCase__)
__lowerCAmelCase: Dict = model(**self.dummy_input)[0]
assert hidden_states is not None, "Make sure output is not None"
def lowercase_ ( self : List[str])-> Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.prepare_init_args_and_inputs_for_common()
__lowerCAmelCase: Tuple = self.model_class(**UpperCamelCase__)
__lowerCAmelCase: List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: List[Any] = [*signature.parameters.keys()]
__lowerCAmelCase: Any = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy")
__lowerCAmelCase: Union[str, Any] = model.to(UpperCamelCase__)
if hasattr(UpperCamelCase__ , "set_default_attn_processor"):
model.set_default_attn_processor()
__lowerCAmelCase: str = self.get_dummy_seed_input()
with torch.no_grad():
__lowerCAmelCase: Dict = model(**UpperCamelCase__)[0]
__lowerCAmelCase: Dict = output[0, :5].flatten().cpu()
print(UpperCamelCase__)
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowerCAmelCase: List[str] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239])
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2))
@slow
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : int , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : int=7_7 , UpperCamelCase__ : Any=0)-> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(UpperCamelCase__)
__lowerCAmelCase: List[Any] = batch_size
__lowerCAmelCase: Any = embedding_dim
__lowerCAmelCase: Dict = num_embeddings
__lowerCAmelCase: Dict = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: str = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: int = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase_ ( self : List[Any])-> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
])
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior")
model.to(UpperCamelCase__)
__lowerCAmelCase: Dict = self.get_dummy_seed_input(seed=UpperCamelCase__)
with torch.no_grad():
__lowerCAmelCase: Optional[Any] = model(**UpperCamelCase__)[0]
assert list(sample.shape) == [1, 7_6_8]
__lowerCAmelCase: Dict = sample[0, :8].flatten().cpu()
print(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = torch.tensor(UpperCamelCase__)
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3)
| 108
| 1
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A = direct_transformers_import(PATH_TO_TRANSFORMERS)
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__A = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowercase__ = True
# Deal with multi-line cases
elif (
re.search(
rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowercase__ , )
is not None
):
lowercase__ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase__ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase__ = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
lowercase__ = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
lowercase__ = True
if not attribute_used:
lowercase__ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase__ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase__ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase__ = True
elif attribute.endswith("""_token_id""" ):
lowercase__ = True
# configuration class specific cases
if not case_allowed:
lowercase__ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowercase__ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _A ( lowercase__ ):
lowercase__ = dict(inspect.signature(config_class.__init__ ).parameters )
lowercase__ = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
lowercase__ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase__ = {}
if len(config_class.attribute_map ) > 0:
lowercase__ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase__ = inspect.getsourcefile(lowercase__ )
lowercase__ = os.path.dirname(lowercase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase__ = [os.path.join(lowercase__ , lowercase__ ) for fn in os.listdir(lowercase__ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
lowercase__ = []
for path in modeling_paths:
if os.path.isfile(lowercase__ ):
with open(lowercase__ ) as fp:
modeling_sources.append(fp.read() )
lowercase__ = []
for config_param, default_value in zip(lowercase__ , lowercase__ ):
# `attributes` here is all the variant names for `config_param`
lowercase__ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowercase__ )
def _A ( ):
lowercase__ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase__ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowercase__ : inspect.isclass(lowercase__ )
and issubclass(lowercase__ , lowercase__ )
and inspect.getmodule(lowercase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowercase__ = check_config_attributes_being_used(lowercase__ )
if len(lowercase__ ) > 0:
lowercase__ = unused_attributes
if len(lowercase__ ) > 0:
lowercase__ = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(lowercase__ )
if __name__ == "__main__":
check_config_attributes()
| 164
|
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=30 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=None , lowerCamelCase__=2 , ) -> Optional[int]:
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 2
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = DeiTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = DeiTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = DeiTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = self.type_sequence_label_size
lowercase__ = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : int = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase : Tuple = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase : Any = False
lowerCamelCase : str = False
lowerCamelCase : str = False
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = DeiTModelTester(self )
lowercase__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A__ ( self ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowercase__ = model(**lowerCamelCase__ ).loss
loss.backward()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase__ = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowercase__ = model(**lowerCamelCase__ ).loss
loss.backward()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowercase__ = problem_type["""title"""]
lowercase__ = problem_type["""num_labels"""]
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase__ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase__ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
lowercase__ = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DeiTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A ( ):
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> int:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
lowerCamelCase__ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase__ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowercase__ = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
lowercase__ = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(lowerCamelCase__ )
| 164
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> str:
'''simple docstring'''
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
lowercase_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
lowercase_ = do_lower_case
lowercase_ = strip_accents
lowercase_ = tokenize_chinese_chars
lowercase_ = normalizer_class(**UpperCAmelCase )
lowercase_ = do_lower_case
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 297
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297
| 1
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Callable ):
@wraps(_SCREAMING_SNAKE_CASE )
def _inner_fn(*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[str] ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , _SCREAMING_SNAKE_CASE , )
return fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return _inner_fn
| 27
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__SCREAMING_SNAKE_CASE : Optional[int] = 256_047
__SCREAMING_SNAKE_CASE : Optional[int] = 256_145
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: int = NllbTokenizer
__UpperCamelCase: Tuple = NllbTokenizerFast
__UpperCamelCase: Union[str, Any] = True
__UpperCamelCase: Dict = True
__UpperCamelCase: Optional[Any] = {}
def _A ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Tuple = NllbTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : Dict ):
_UpperCAmelCase : Tuple = NllbTokenizer(A , keep_accents=A )
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(A , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _A ( self : List[Any] ):
_UpperCAmelCase : Any = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(A , **A )
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A )
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A )
_UpperCAmelCase : Dict = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_UpperCAmelCase : Optional[int] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[Any] = tokenizer_r.from_pretrained(A )
_UpperCAmelCase : List[str] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
_UpperCAmelCase : str = tokenizer_r.save_pretrained(A , legacy_format=A )
_UpperCAmelCase : str = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
_UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A )
_UpperCAmelCase : Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(A , legacy_format=A )
_UpperCAmelCase : Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[Any] = tokenizer_r.from_pretrained(A )
_UpperCAmelCase : Optional[int] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
def _A ( self : Tuple ):
if not self.test_seqaseq:
return
_UpperCAmelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
_UpperCAmelCase : Optional[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
_UpperCAmelCase : Optional[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
_UpperCAmelCase : Optional[int] = tokenizer.prepare_seqaseq_batch(
src_texts=A , tgt_texts=A , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
_UpperCAmelCase : Tuple = tokenizer.prepare_seqaseq_batch(
A , tgt_texts=A , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
_UpperCAmelCase : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=A , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , A )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def _A ( self : List[Any] ):
pass
def _A ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Any = [AddedToken("<special>" , lstrip=A )]
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , additional_special_tokens=A , **A )
_UpperCAmelCase : Dict = tokenizer_r.encode("Hey this is a <special> token" )
_UpperCAmelCase : Any = tokenizer_r.encode("<special>" , add_special_tokens=A )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(
A , additional_special_tokens=A , **A , )
_UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(
A , additional_special_tokens=A , **A )
_UpperCAmelCase : Union[str, Any] = tokenizer_p.encode("Hey this is a <special> token" )
_UpperCAmelCase : Any = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Dict = "facebook/nllb-200-distilled-600M"
__UpperCamelCase: Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__UpperCamelCase: str = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__UpperCamelCase: str = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def _A ( cls : int ):
_UpperCAmelCase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
_UpperCAmelCase : Union[str, Any] = 1
return cls
def _A ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 256057 )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _A ( self : Tuple ):
self.assertIn(A , self.tokenizer.all_special_ids )
# fmt: off
_UpperCAmelCase : List[Any] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
_UpperCAmelCase : Tuple = self.tokenizer.decode(A , skip_special_tokens=A )
_UpperCAmelCase : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _A ( self : Optional[int] ):
_UpperCAmelCase : List[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , A )
_UpperCAmelCase : Dict = 10
_UpperCAmelCase : Tuple = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , A )
self.assertEqual(len(A ) , A )
def _A ( self : Dict ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [256203, 3] )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = tempfile.mkdtemp()
_UpperCAmelCase : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
_UpperCAmelCase : Tuple = NllbTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def _A ( self : Dict ):
_UpperCAmelCase : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_UpperCAmelCase : Tuple = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(A , A )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
_UpperCAmelCase : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(A , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _A ( self : str ):
_UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="pt" )
_UpperCAmelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="pt" )
_UpperCAmelCase : List[Any] = targets["input_ids"]
_UpperCAmelCase : Union[str, Any] = shift_tokens_right(
A , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _A ( self : List[Any] ):
_UpperCAmelCase : str = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
"input_ids": [[256047, 70, 7356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 256057,
} , )
@require_torch
def _A ( self : Any ):
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Any = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : str = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 31
| 0
|
from __future__ import annotations
from statistics import mean
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> list[int]:
lowercase : Optional[Any] = [0] * no_of_processes
lowercase : Tuple = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : str = burst_time[i]
lowercase : list[int] = []
lowercase : List[Any] = 0
lowercase : Tuple = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowercase : Tuple = []
lowercase : Optional[Any] = -1
for i in range(SCREAMING_SNAKE_CASE__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowercase : str = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowercase : List[str] = i
total_time += burst_time[target_process]
completed += 1
lowercase : List[str] = 0
lowercase : Dict = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> list[int]:
lowercase : Tuple = [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowercase : Optional[int] = 4
lowercase : Optional[int] = [2, 5, 3, 7]
lowercase : int = [0, 0, 0, 0]
lowercase : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase : List[Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 285
|
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number_of_steps > 0
), f"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
lowercase , lowercase : Tuple = 1, 1
for _ in range(number_of_steps - 1 ):
lowercase , lowercase : str = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.