code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Dict = KandinskyVaaPipeline
lowerCamelCase : Tuple = [
"""image_embeds""",
"""negative_image_embeds""",
]
lowerCamelCase : int = ["""image_embeds""", """negative_image_embeds"""]
lowerCamelCase : int = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCamelCase : str = False
@property
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self ) -> str:
'''simple docstring'''
return 100
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase__ = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCamelCase__ , )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Dict:
'''simple docstring'''
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(lowerCamelCase__ )
else:
lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**lowerCamelCase__ )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
lowercase__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
lowercase__ = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
lowercase__ = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = """red cat, 4k photo"""
lowercase__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
lowercase__ , lowercase__ = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
lowercase__ = pipeline(
image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , output_type="""np""" , )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 164
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 164
| 1
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 19
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ) ->int:
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = eval_examples
SCREAMING_SNAKE_CASE : Optional[int] = post_process_function
def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = "eval" , **_lowerCamelCase , ) ->Dict[str, float]:
SCREAMING_SNAKE_CASE : Any = gen_kwargs.copy()
SCREAMING_SNAKE_CASE : str = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE : Dict = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE : Any = gen_kwargs
SCREAMING_SNAKE_CASE : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE : str = self.get_eval_dataloader(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Optional[Any] = self.compute_metrics
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
SCREAMING_SNAKE_CASE : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : Tuple = eval_loop(
_lowerCamelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE : Dict = compute_metrics
SCREAMING_SNAKE_CASE : Tuple = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE : Tuple = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
SCREAMING_SNAKE_CASE : Optional[int] = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase )
return metrics
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase = "test" , **_lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : str = gen_kwargs.copy()
SCREAMING_SNAKE_CASE : str = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Dict = self.compute_metrics
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : List[str] = time.time()
SCREAMING_SNAKE_CASE : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : Any = eval_loop(
_lowerCamelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE : Optional[int] = compute_metrics
SCREAMING_SNAKE_CASE : List[Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE : Tuple = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , '''predict''' )
SCREAMING_SNAKE_CASE : Dict = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
SCREAMING_SNAKE_CASE : List[Any] = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
| 19
| 1
|
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> List[str]:
__UpperCamelCase =[2, 1, 2, -1]
__UpperCamelCase =[1, 2, 3, 4]
def _a ( self ) -> Tuple:
__UpperCamelCase =len(self.first_signal )
__UpperCamelCase =len(self.second_signal )
__UpperCamelCase =max(__A , __A )
# create a zero matrix of max_length x max_length
__UpperCamelCase =[[0] * max_length for i in range(__A )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__A ):
__UpperCamelCase =deque(self.second_signal )
rotated_signal.rotate(__A )
for j, item in enumerate(__A ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCamelCase =np.matmul(np.transpose(__A ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__A , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 62
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a__ : str =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"]
def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ):
super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A )
__UpperCamelCase = num_mel_bins
__UpperCamelCase = do_ceptral_normalize
__UpperCamelCase = normalize_means
__UpperCamelCase = normalize_vars
__UpperCamelCase = True
def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ):
__UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
__UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 )
__UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
__UpperCamelCase = x[:input_length].mean(axis=0 )
__UpperCamelCase = np.subtract(__A , __A )
if normalize_vars:
__UpperCamelCase = x[:input_length].std(axis=0 )
__UpperCamelCase = np.divide(__A , __A )
if input_length < x.shape[0]:
__UpperCamelCase = padding_value
# make sure array is in float32
__UpperCamelCase = x.astype(np.floataa )
return x
def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ):
__UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__A , __A )
]
def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCamelCase = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
__UpperCamelCase = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase = [raw_speech]
# extract fbank features
__UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech]
# convert into correct format for padding
__UpperCamelCase = BatchFeature({'input_features': features} )
__UpperCamelCase = self.pad(
__A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , )
# make sure list is in array format
__UpperCamelCase = padded_inputs.get('input_features' )
if isinstance(input_features[0] , __A ):
__UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features]
__UpperCamelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
__UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__UpperCamelCase = (
np.array(__A , dtype=np.intaa )
if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__UpperCamelCase = self.normalize(
padded_inputs['input_features'] , attention_mask=__A )
if return_tensors is not None:
__UpperCamelCase = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 53
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( _lowerCAmelCase ):
'''simple docstring'''
_lowercase : Dict = '''audio-spectrogram-transformer'''
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=16 , _lowercase=True , _lowercase=10 , _lowercase=10 , _lowercase=1_024 , _lowercase=128 , **_lowercase , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = patch_size
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = frequency_stride
_lowerCAmelCase = time_stride
_lowerCAmelCase = max_length
_lowerCAmelCase = num_mel_bins
| 366
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowercase = 10
def A (__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :list[int] , __lowerCamelCase :int ):
for i in range(__lowerCamelCase , __lowerCamelCase ):
if array[i] == target:
return i
return -1
def A (__lowerCamelCase :list[int] , __lowerCamelCase :int ):
_lowerCAmelCase = 0
_lowerCAmelCase = len(__lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = (left + right) // 3 + 1
_lowerCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCAmelCase = one_third - 1
elif array[two_third] < target:
_lowerCAmelCase = two_third + 1
else:
_lowerCAmelCase = one_third + 1
_lowerCAmelCase = two_third - 1
else:
return -1
def A (__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :list[int] , __lowerCamelCase :int ):
if left < right:
if right - left < precision:
return lin_search(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = (left + right) // 3 + 1
_lowerCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__lowerCamelCase , one_third - 1 , __lowerCamelCase , __lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __lowerCamelCase , __lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input("""Enter numbers separated by comma:\n""").strip()
_lowercase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_lowercase = int(input("""Enter the number to be found in the list:\n""").strip())
_lowercase = ite_ternary_search(collection, target)
_lowercase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""")
| 229
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowercase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
a__: str = 'lm_head'
a__: List[str] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
a__: Tuple = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
a__: Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
a__: Any = value
elif weight_type == "weight_g":
a__: Optional[int] = value
elif weight_type == "weight_v":
a__: str = value
elif weight_type == "bias":
a__: Optional[Any] = value
else:
a__: List[Any] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: str = []
a__: Dict = fairseq_model.state_dict()
a__: Any = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
a__: Tuple = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
a__: List[str] = True
else:
for key, mapped_key in MAPPING.items():
a__: Tuple = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__: Optional[Any] = True
if "*" in mapped_key:
a__: List[Any] = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
a__: Dict = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a__: Tuple = 'weight_g'
elif "weight_v" in name:
a__: str = 'weight_v'
elif "bias" in name:
a__: Optional[int] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__: List[Any] = 'weight'
else:
a__: List[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
a__: str = full_name.split('conv_layers.' )[-1]
a__: List[Any] = name.split('.' )
a__: List[Any] = int(items[0] )
a__: Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
a__: int = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
a__: Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
a__: Optional[int] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
a__: Union[str, Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True ) ->Union[str, Any]:
if config_path is not None:
a__: Optional[int] = UniSpeechConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
a__: List[str] = UniSpeechConfig()
if is_finetuned:
if dict_path:
a__: Tuple = Dictionary.load_from_json(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__: List[str] = target_dict.pad_index
a__: Dict = target_dict.bos_index
a__: List[str] = target_dict.eos_index
a__: Any = len(target_dict.symbols )
a__: Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
a__: Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
a__: List[Any] = 42
a__: Optional[Any] = 43
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Dict = WavaVecaPhonemeCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_SCREAMING_SNAKE_CASE , )
a__: Union[str, Any] = True if config.feat_extract_norm == 'layer' else False
a__: str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
a__: List[Any] = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
a__: Dict = UniSpeechForCTC(_SCREAMING_SNAKE_CASE )
else:
a__: List[Any] = UniSpeechForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned:
a__ , a__ , a__: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
a__ , a__ , a__: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a__: str = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_unispeech.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 290
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase__ = TypeVar('T')
lowercase__ = Union[List[T], Tuple[T, ...]]
lowercase__ = Union[T, List[T], Dict[str, T]]
lowercase__ = Union[str, bytes, os.PathLike]
| 290
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase__ : int = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
UpperCamelCase__ : List[str] = {
"""camembert-base""": 512,
}
UpperCamelCase__ : List[Any] = """▁"""
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ = CamembertTokenizer
def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[int]=None ,__lowerCamelCase : List[Any]=None ,__lowerCamelCase : List[Any]="<s>" ,__lowerCamelCase : Optional[Any]="</s>" ,__lowerCamelCase : Union[str, Any]="</s>" ,__lowerCamelCase : int="<s>" ,__lowerCamelCase : Any="<unk>" ,__lowerCamelCase : Optional[int]="<pad>" ,__lowerCamelCase : Optional[int]="<mask>" ,__lowerCamelCase : str=["<s>NOTUSED", "</s>NOTUSED"] ,**__lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase ,tokenizer_file=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,additional_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,)
a = vocab_file
a = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a = [self.cls_token_id]
a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file ,__lowerCamelCase )
return (out_vocab_file,)
| 371
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'yolos'
def __init__( self : Union[str, Any] ,__lowerCamelCase : int=7_68 ,__lowerCamelCase : Dict=12 ,__lowerCamelCase : Union[str, Any]=12 ,__lowerCamelCase : List[Any]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : int=0.0 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : Optional[Any]=0.02 ,__lowerCamelCase : int=1e-12 ,__lowerCamelCase : Any=[5_12, 8_64] ,__lowerCamelCase : Tuple=16 ,__lowerCamelCase : int=3 ,__lowerCamelCase : Tuple=True ,__lowerCamelCase : Optional[int]=1_00 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : int=1 ,__lowerCamelCase : List[Any]=5 ,__lowerCamelCase : Optional[int]=2 ,__lowerCamelCase : int=5 ,__lowerCamelCase : str=2 ,__lowerCamelCase : Tuple=0.1 ,**__lowerCamelCase : List[Any] ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = num_detection_tokens
a = use_mid_position_embeddings
a = auxiliary_loss
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return 12
| 330
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __A ( unittest.TestCase ):
a__ : Tuple = StableDiffusionLDMaDPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowercase (self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(__UpperCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : int=0 ):
if str(__UpperCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
UpperCAmelCase_ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_ = ldmad_pipe(**__UpperCAmelCase )
UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb[0, -3:, -3:, -1]
UpperCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCAmelCase_ = np.array(
[0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] )
UpperCAmelCase_ = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
UpperCAmelCase_ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_ = 3 * [inputs["""prompt"""]]
# forward
UpperCAmelCase_ = ldmad_pipe(**__UpperCAmelCase )
UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
UpperCAmelCase_ = depth_slice_a[0, -3:, -1]
UpperCAmelCase_ = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_ = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ = ldmad_pipe.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="pt" , )
UpperCAmelCase_ = text_inputs["""input_ids"""].to(__UpperCAmelCase )
UpperCAmelCase_ = ldmad_pipe.text_encoder(__UpperCAmelCase )[0]
UpperCAmelCase_ = prompt_embeds
# forward
UpperCAmelCase_ = ldmad_pipe(**__UpperCAmelCase )
UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
UpperCAmelCase_ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
UpperCAmelCase_ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
UpperCAmelCase_ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_ = """french fries"""
UpperCAmelCase_ = ldmad_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb[0, -3:, -3:, -1]
UpperCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCAmelCase_ = np.array(
[0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] )
UpperCAmelCase_ = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : str , __a : int , __a : List[str]="cpu" , __a : Dict=torch.floataa , __a : Tuple=0 ):
UpperCAmelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_ = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
UpperCAmelCase_ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
UpperCAmelCase_ = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
UpperCAmelCase_ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_ = self.get_inputs(__UpperCAmelCase )
UpperCAmelCase_ = ldmad_pipe(**__UpperCAmelCase )
UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb[0, -3:, -3:, -1].flatten()
UpperCAmelCase_ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCAmelCase_ = np.array(
[0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] )
UpperCAmelCase_ = np.array(
[0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : int , __a : Dict , __a : int="cpu" , __a : List[Any]=torch.floataa , __a : Union[str, Any]=0 ):
UpperCAmelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_ = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
UpperCAmelCase_ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
UpperCAmelCase_ = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_ = self.get_inputs(__UpperCAmelCase )
UpperCAmelCase_ = ldmad_pipe(**__UpperCAmelCase )
UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = 0.49_55_86
UpperCAmelCase_ = 0.33_79_55_15
UpperCAmelCase_ = 1_12.4_85_18
UpperCAmelCase_ = 98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_ = self.get_inputs(__UpperCAmelCase )
UpperCAmelCase_ = ldmad_pipe(**__UpperCAmelCase )
UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = 0.4_19_41_27
UpperCAmelCase_ = 0.35_37_55_86
UpperCAmelCase_ = 0.5_63_85_02
UpperCAmelCase_ = 0.34_68_61_03
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 1
|
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_UpperCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : tuple , lowerCAmelCase__ : Path , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int]=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
else:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
@torch.no_grad()
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
__UpperCAmelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCAmelCase : Optional[int] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__UpperCAmelCase : Dict = """cpu"""
__UpperCAmelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=lowerCAmelCase__ ).to(lowerCAmelCase__ )
__UpperCAmelCase : List[str] = Path(lowerCAmelCase__ )
# TEXT ENCODER
__UpperCAmelCase : Any = pipeline.text_encoder.config.max_position_embeddings
__UpperCAmelCase : str = pipeline.text_encoder.config.hidden_size
__UpperCAmelCase : Optional[Any] = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase__ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=lowerCAmelCase__ , )
del pipeline.text_encoder
# UNET
__UpperCAmelCase : Optional[int] = pipeline.unet.config.in_channels
__UpperCAmelCase : Tuple = pipeline.unet.config.sample_size
__UpperCAmelCase : Dict = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=lowerCAmelCase__ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , )
__UpperCAmelCase : Any = str(unet_path.absolute().as_posix() )
__UpperCAmelCase : int = os.path.dirname(lowerCAmelCase__ )
__UpperCAmelCase : Tuple = onnx.load(lowerCAmelCase__ )
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase__ )
os.mkdir(lowerCAmelCase__ )
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase__ , lowerCAmelCase__ , save_as_external_data=lowerCAmelCase__ , all_tensors_to_one_file=lowerCAmelCase__ , location="""weights.pb""" , convert_attribute=lowerCAmelCase__ , )
del pipeline.unet
# VAE ENCODER
__UpperCAmelCase : Union[str, Any] = pipeline.vae
__UpperCAmelCase : str = vae_encoder.config.in_channels
__UpperCAmelCase : Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__UpperCAmelCase : str = lambda lowerCAmelCase__ , lowerCAmelCase__ : vae_encoder.encode(lowerCAmelCase__ , lowerCAmelCase__ )[0].sample()
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowerCAmelCase__ , )
# VAE DECODER
__UpperCAmelCase : Optional[Any] = pipeline.vae
__UpperCAmelCase : Optional[int] = vae_decoder.config.latent_channels
__UpperCAmelCase : Dict = vae_decoder.config.out_channels
# forward only through the decoder part
__UpperCAmelCase : List[Any] = vae_encoder.decode
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowerCAmelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__UpperCAmelCase : Tuple = pipeline.safety_checker
__UpperCAmelCase : Union[str, Any] = safety_checker.config.vision_config.num_channels
__UpperCAmelCase : Any = safety_checker.config.vision_config.image_size
__UpperCAmelCase : Optional[int] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=lowerCAmelCase__ , )
del pipeline.safety_checker
__UpperCAmelCase : Optional[Any] = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
__UpperCAmelCase : Any = pipeline.feature_extractor
else:
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCAmelCase__ )
print("""ONNX pipeline saved to""" , lowerCAmelCase__ )
del pipeline
del onnx_pipeline
__UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_UpperCamelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 254
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : Tuple = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : List[str] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : Union[str, Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 350
|
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> bool:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__A ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(__A ) == 1:
return True
a_ : Tuple = series[1] - series[0]
for index in range(len(__A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> float:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__A ) == 0:
raise ValueError('Input list must be a non empty list' )
a_ : str = 0
for val in series:
answer += val
return answer / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120
| 0
|
import math
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ = 1 / 1_2_3_4_5 ):
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 3
while True:
lowerCamelCase_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase__ ):
lowerCamelCase_ = int(lowerCamelCase__ )
total_partitions += 1
if check_partition_perfect(lowerCamelCase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase__ )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19
|
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCamelCase_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : bool = field(default=A , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=A , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=A , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=A , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = super().to_dict()
for k, v in d.items():
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Dict = v.to_dict()
return d
| 239
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''xlm-prophetnet'''
SCREAMING_SNAKE_CASE_ : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = "gelu" ,SCREAMING_SNAKE_CASE__ = 3_05_22 ,SCREAMING_SNAKE_CASE__ = 10_24 ,SCREAMING_SNAKE_CASE__ = 40_96 ,SCREAMING_SNAKE_CASE__ = 12 ,SCREAMING_SNAKE_CASE__ = 16 ,SCREAMING_SNAKE_CASE__ = 40_96 ,SCREAMING_SNAKE_CASE__ = 12 ,SCREAMING_SNAKE_CASE__ = 16 ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = 5_12 ,SCREAMING_SNAKE_CASE__ = 0.0_2 ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 2 ,SCREAMING_SNAKE_CASE__ = 32 ,SCREAMING_SNAKE_CASE__ = 1_28 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = 2 ,**SCREAMING_SNAKE_CASE__ ,) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = vocab_size
__SCREAMING_SNAKE_CASE :Tuple = hidden_size
__SCREAMING_SNAKE_CASE :Optional[int] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE :Optional[int] = num_encoder_layers
__SCREAMING_SNAKE_CASE :Tuple = num_encoder_attention_heads
__SCREAMING_SNAKE_CASE :List[Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_decoder_layers
__SCREAMING_SNAKE_CASE :Optional[Any] = num_decoder_attention_heads
__SCREAMING_SNAKE_CASE :List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE :Dict = init_std # Normal(0, this parameter)
__SCREAMING_SNAKE_CASE :List[Any] = activation_function
# parameters for xlmprophetnet
__SCREAMING_SNAKE_CASE :Tuple = ngram
__SCREAMING_SNAKE_CASE :int = num_buckets
__SCREAMING_SNAKE_CASE :Optional[int] = relative_max_distance
__SCREAMING_SNAKE_CASE :Union[str, Any] = disable_ngram_loss
__SCREAMING_SNAKE_CASE :Dict = eps
# 3 Types of Dropout
__SCREAMING_SNAKE_CASE :List[str] = attention_dropout
__SCREAMING_SNAKE_CASE :Dict = activation_dropout
__SCREAMING_SNAKE_CASE :Union[str, Any] = dropout
__SCREAMING_SNAKE_CASE :int = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,add_cross_attention=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 239
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[int] = []
lowercase__ : Optional[Any] = set({'(', '[', '{'} )
lowercase__ : List[str] = set({')', ']', '}'} )
lowercase__ : Optional[int] = {'{': '}', '[': ']', '(': ')'}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def a_ ( ):
'''simple docstring'''
lowercase__ : List[Any] = input('Enter sequence of brackets: ' )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , 'is balanced' )
else:
print(_lowerCAmelCase , 'is not balanced' )
if __name__ == "__main__":
main()
| 77
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_A : str = logging.get_logger(__name__)
_A : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def UpperCamelCase_ ( snake_case_ : Tuple ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__lowerCAmelCase = k.replace(snake_case_ , snake_case_ )
if k.startswith("""encoder""" ):
__lowerCAmelCase = k.replace(""".attn""" , """.self_attn""" )
__lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowerCAmelCase = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowerCAmelCase = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__lowerCAmelCase = k.replace("""norm3""" , """final_layer_norm""" )
return k
def UpperCamelCase_ ( snake_case_ : List[str] ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__lowerCAmelCase = sd.pop(snake_case_ )
__lowerCAmelCase = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__lowerCAmelCase = v
_A : int = ['''START''']
@torch.no_grad()
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
__lowerCAmelCase = model["""model"""]
__lowerCAmelCase = BlenderbotConfig.from_json_file(snake_case_ )
__lowerCAmelCase = BlenderbotForConditionalGeneration(snake_case_ )
__lowerCAmelCase = m.model.state_dict().keys()
__lowerCAmelCase = []
__lowerCAmelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__lowerCAmelCase = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__lowerCAmelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_ , strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
_A : Tuple = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 229
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class snake_case_( snake_case__ ):
__UpperCamelCase = '''lxmert'''
__UpperCamelCase = {}
def __init__( self : Tuple , UpperCamelCase_ : Tuple=3_0_5_2_2 , UpperCamelCase_ : int=7_6_8 , UpperCamelCase_ : Optional[int]=1_2 , UpperCamelCase_ : Union[str, Any]=9_5_0_0 , UpperCamelCase_ : Optional[Any]=1_6_0_0 , UpperCamelCase_ : Tuple=4_0_0 , UpperCamelCase_ : List[str]=3_0_7_2 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[Any]=5_1_2 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[str]=1E-12 , UpperCamelCase_ : str=9 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : Optional[int]=5 , UpperCamelCase_ : List[Any]=2_0_4_8 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Optional[int]=6.67 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Dict=True , **UpperCamelCase_ : List[str] , ):
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : Optional[Any] = type_vocab_size
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : Any = num_qa_labels
lowerCAmelCase : str = num_object_labels
lowerCAmelCase : Optional[int] = num_attr_labels
lowerCAmelCase : Tuple = l_layers
lowerCAmelCase : str = x_layers
lowerCAmelCase : Dict = r_layers
lowerCAmelCase : Optional[Any] = visual_feat_dim
lowerCAmelCase : Tuple = visual_pos_dim
lowerCAmelCase : Optional[Any] = visual_loss_normalizer
lowerCAmelCase : Dict = task_matched
lowerCAmelCase : Optional[int] = task_mask_lm
lowerCAmelCase : str = task_obj_predict
lowerCAmelCase : Any = task_qa
lowerCAmelCase : int = visual_obj_loss
lowerCAmelCase : List[str] = visual_attr_loss
lowerCAmelCase : Any = visual_feat_loss
lowerCAmelCase : List[Any] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**UpperCAmelCase_ )
| 365
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowerCAmelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase : Dict = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
'''simple docstring'''
import os
import sys
import unittest
__a: str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a: Union[str, Any] = os.path.join(git_repo_path, """src""", """transformers""")
__a: List[Any] = """
{0} = None
"""
__a: Optional[int] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__a: List[str] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> int:
lowercase__ : int = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(__UpperCAmelCase )
lowercase__ : List[str] = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(__UpperCAmelCase , '''tokenizers''' )
lowercase__ : List[str] = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(__UpperCAmelCase , '''tensorflow_text''' )
lowercase__ : Optional[int] = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(__UpperCAmelCase , '''sentencepiece_and_tokenizers''' )
lowercase__ : Dict = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(__UpperCAmelCase , '''sentencepiece_and_tensorflow_text''' )
lowercase__ : int = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(__UpperCAmelCase , '''sentencepiece_and_tokenizers_and_vision''' )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[int] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __UpperCAmelCase )
self.assertIn('''tensorflow_text''' , __UpperCAmelCase )
self.assertIn('''sentencepiece_and_tokenizers''' , __UpperCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(__UpperCAmelCase , '''\nCONSTANT = None\n''' )
lowercase__ : Optional[int] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
__UpperCAmelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
lowercase__ : List[Any] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
lowercase__ : int = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
lowercase__ : Dict = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , __UpperCAmelCase )
| 198
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a_ = None
try:
import msvcrt
except ImportError:
a_ = None
try:
import fcntl
except ImportError:
a_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a_ = OSError
# Data
# ------------------------------------------------
a_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
a_ = """3.0.12"""
a_ = None
def a__ ( ):
global _logger
__lowerCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock_file
return None
def __str__( self ):
'''simple docstring'''
__lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase )
# The path to the lock file.
__lowerCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCamelCase = None
# The default timeout value.
__lowerCamelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCamelCase = 0
return None
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = float(__UpperCAmelCase )
return None
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ):
'''simple docstring'''
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
__lowerCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__UpperCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCamelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCamelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=__UpperCAmelCase )
return None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = os.path.basename(__UpperCAmelCase )
if len(__UpperCAmelCase ) > max_length and max_length > 0:
__lowerCamelCase = os.path.dirname(__UpperCAmelCase )
__lowerCamelCase = str(hash(__UpperCAmelCase ) )
__lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
__lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
try:
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN )
os.close(__UpperCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__lowerCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a_ = None
if msvcrt:
a_ = WindowsFileLock
elif fcntl:
a_ = UnixFileLock
else:
a_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 330
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Any = RobertaTokenizer
_SCREAMING_SNAKE_CASE : Any = RobertaTokenizerFast
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Tuple = {'''cls_token''': '''<s>'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCAmelCase__ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
lowerCAmelCase__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase__ = {'unk_token': '<unk>'}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
def UpperCamelCase__ ( self , **_UpperCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase__ ( self , **_UpperCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = 'lower newer'
return input_text, output_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCAmelCase__ = tokenizer.tokenize(_UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer_class.from_pretrained('roberta-base' )
lowerCAmelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = 'Encode this sequence.'
lowerCAmelCase__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
lowerCAmelCase__ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
lowerCAmelCase__ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
# Testing spaces after special tokens
lowerCAmelCase__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )} ) # mask token has a left space
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
lowerCAmelCase__ = 'Encode <mask> sequence'
lowerCAmelCase__ = 'Encode <mask>sequence'
lowerCAmelCase__ = tokenizer.encode(_UpperCamelCase )
lowerCAmelCase__ = encoded.index(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode(_UpperCamelCase )
lowerCAmelCase__ = encoded.index(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = 'A, <mask> AllenNLP sentence.'
lowerCAmelCase__ = tokenizer_r.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_p.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
lowerCAmelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ = F"{text_of_1_token} {text_of_1_token}"
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
lowerCAmelCase__ = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
| 122
|
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : int ) -> float:
"""simple docstring"""
lowerCAmelCase__ = x
lowerCAmelCase__ = y
for step in range(UpperCamelCase_ ): # noqa: B007
lowerCAmelCase__ = a * a - b * b + x
lowerCAmelCase__ = 2 * a * b + y
lowerCAmelCase__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( UpperCamelCase_ : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _UpperCamelCase ( UpperCamelCase_ : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCamelCase_ , 1 , 1 ) )
def _UpperCamelCase ( UpperCamelCase_ : int = 800 , UpperCamelCase_ : int = 600 , UpperCamelCase_ : float = -0.6 , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 3.2 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : bool = True , ) -> Image.Image:
"""simple docstring"""
lowerCAmelCase__ = Image.new('RGB' , (image_width, image_height) )
lowerCAmelCase__ = img.load()
# loop through the image-coordinates
for image_x in range(UpperCamelCase_ ):
for image_y in range(UpperCamelCase_ ):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase__ = figure_width / image_width * image_height
lowerCAmelCase__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase__ = get_distance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase__ = get_color_coded_rgb(UpperCamelCase_ )
else:
lowerCAmelCase__ = get_black_and_white_rgb(UpperCamelCase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__snake_case : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 122
| 1
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int = None , UpperCamelCase__ : int = None ):
"""simple docstring"""
super().__init__()
UpperCamelCase = pad_token_id
UpperCamelCase = max_length
UpperCamelCase = vocab
UpperCamelCase = merges
UpperCamelCase = BytePairTokenizer(UpperCamelCase__ , UpperCamelCase__ , sequence_length=UpperCamelCase__ )
@classmethod
def A ( cls : List[str] , UpperCamelCase__ : GPTaTokenizer , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = [""" """.join(UpperCamelCase__ ) for m in tokenizer.bpe_ranks.keys()]
UpperCamelCase = tokenizer.get_vocab()
return cls(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def A ( cls : int , UpperCamelCase__ : Union[str, os.PathLike] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = GPTaTokenizer.from_pretrained(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
return cls.from_tokenizer(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def A ( cls : str , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
return cls(**UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int = None ):
"""simple docstring"""
UpperCamelCase = self.tf_tokenizer(UpperCamelCase__ )
UpperCamelCase = tf.ones_like(UpperCamelCase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCamelCase = max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCamelCase = pad_model_inputs(
UpperCamelCase__ , max_seq_length=UpperCamelCase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 28
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : int = 10_00 ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 3
lowerCAmelCase_ : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 120
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A_ ( A__ ) -> str:
a__ : Any = 384
if "tiny" in model_name:
a__ : List[Any] = [3, 3, 9, 3]
a__ : Optional[Any] = [96, 192, 384, 768]
if "small" in model_name:
a__ : Union[str, Any] = [3, 3, 27, 3]
a__ : List[Any] = [96, 192, 384, 768]
if "base" in model_name:
a__ : int = [3, 3, 27, 3]
a__ : List[str] = [128, 256, 512, 1024]
a__ : Optional[int] = 512
if "large" in model_name:
a__ : Optional[int] = [3, 3, 27, 3]
a__ : Any = [192, 384, 768, 1536]
a__ : int = 768
if "xlarge" in model_name:
a__ : str = [3, 3, 27, 3]
a__ : int = [256, 512, 1024, 2048]
a__ : List[str] = 1024
# set label information
a__ : int = 150
a__ : List[Any] = 'huggingface/label-files'
a__ : str = 'ade20k-id2label.json'
a__ : Optional[int] = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
a__ : List[str] = {int(A__ ): v for k, v in idalabel.items()}
a__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
a__ : List[Any] = ConvNextConfig(
depths=A__ , hidden_sizes=A__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
a__ : Optional[int] = UperNetConfig(
backbone_config=A__ , auxiliary_in_channels=A__ , num_labels=A__ , idalabel=A__ , labelaid=A__ , )
return config
def A_ ( A__ ) -> Tuple:
a__ : Optional[int] = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def A_ ( A__ , A__ , A__ ) -> str:
a__ : List[str] = dct.pop(A__ )
a__ : int = val
def A_ ( A__ , A__ , A__ ) -> str:
a__ : Tuple = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
a__ : Dict = model_name_to_url[model_name]
a__ : Optional[int] = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )['state_dict']
a__ : List[Any] = get_upernet_config(A__ )
a__ : Dict = UperNetForSemanticSegmentation(A__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a__ : Dict = state_dict.pop(A__ )
if "bn" in key:
a__ : Optional[int] = key.replace('bn' , 'batch_norm' )
a__ : List[Any] = val
# rename keys
a__ : Union[str, Any] = create_rename_keys(A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
model.load_state_dict(A__ )
# verify on image
a__ : str = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
a__ : int = Image.open(requests.get(A__ , stream=A__ ).raw ).convert('RGB' )
a__ : Union[str, Any] = SegformerImageProcessor()
a__ : Union[str, Any] = processor(A__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
a__ : Optional[Any] = model(A__ )
if model_name == "upernet-convnext-tiny":
a__ : Union[str, Any] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
a__ : int = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
a__ : int = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
a__ : Optional[Any] = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
a__ : Optional[int] = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(A__ )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 365
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase : Optional[Any] = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
lowercase : List[str] = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Dict = VOCAB_FILES_NAMES
__A : str = PRETRAINED_VOCAB_FILES_MAP
__A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Any = ['''input_ids''', '''attention_mask''']
__A : Tuple = MvpTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
a__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : Dict = getattr(lowercase , pre_tok_state.pop('type'))
a__ : str = add_prefix_space
a__ : Union[str, Any] = pre_tok_class(**lowercase)
a__ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Optional[int] = 'post_processor'
a__ : Optional[int] = getattr(self.backend_tokenizer , lowercase , lowercase)
if tokenizer_component_instance:
a__ : str = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Any = tuple(state['sep'])
if "cls" in state:
a__ : str = tuple(state['cls'])
a__ : List[str] = False
if state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : List[str] = add_prefix_space
a__ : List[str] = True
if state.get('trim_offsets' , lowercase) != trim_offsets:
a__ : Optional[int] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : Optional[int] = getattr(lowercase , state.pop('type'))
a__ : Tuple = component_class(**lowercase)
setattr(self.backend_tokenizer , lowercase , lowercase)
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowercase ( self , lowercase) -> Any:
'''simple docstring'''
a__ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else value
a__ : str = value
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Optional[Any] = kwargs.get('is_split_into_words' , lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : List[str] = kwargs.get('is_split_into_words' , lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : List[str] = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
def __lowercase ( self , lowercase , lowercase=None) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : int = [self.sep_token_id]
a__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 225
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class __magic_name__ :
def __init__( self : Any ):
lowercase_ : list[Any] = []
lowercase_ : int = 0
lowercase_ : int = 0
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.head == self.tail
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Any ):
self.data.append(lowercase_ )
lowercase_ : Optional[Any] = self.tail + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[Any] = self.data[self.head]
lowercase_ : Optional[int] = self.head + 1
return ret
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return self.tail - self.head
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class __magic_name__ :
def __init__( self : Any , lowercase_ : Any ):
lowercase_ : Optional[Any] = data
lowercase_ : MyNode | None = None
lowercase_ : MyNode | None = None
lowercase_ : int = 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return self.data
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return self.left
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self.right
def SCREAMING_SNAKE_CASE_ ( self : str ):
return self.height
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Dict = data
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : MyNode | None ):
lowercase_ : Optional[Any] = node
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : MyNode | None ):
lowercase_ : str = node
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : int ):
lowercase_ : Tuple = height
def lowerCamelCase ( UpperCAmelCase__ : MyNode | None ) -> int:
if node is None:
return 0
return node.get_height()
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if a > b:
return a
return b
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
print("""left rotation node:""" , node.get_data() )
lowercase_ : Union[str, Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(UpperCAmelCase__ )
lowercase_ : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
lowercase_ : Optional[int] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
print("""right rotation node:""" , node.get_data() )
lowercase_ : Dict = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(UpperCAmelCase__ )
lowercase_ : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
lowercase_ : str = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
lowercase_ : Optional[Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(UpperCAmelCase__ ) )
return right_rotation(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
lowercase_ : Optional[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(UpperCAmelCase__ ) )
return left_rotation(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : MyNode | None , UpperCAmelCase__ : Any ) -> MyNode | None:
if node is None:
return MyNode(UpperCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , UpperCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowercase_ : List[Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowercase_ : List[Any] = right_rotation(UpperCAmelCase__ )
else:
lowercase_ : Any = lr_rotation(UpperCAmelCase__ )
else:
node.set_right(insert_node(node.get_right() , UpperCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowercase_ : Any = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowercase_ : Optional[int] = rl_rotation(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = left_rotation(UpperCAmelCase__ )
lowercase_ : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
return node
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> Any:
while True:
lowercase_ : Any = root.get_right()
if right_child is None:
break
lowercase_ : List[str] = right_child
return root.get_data()
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> Any:
while True:
lowercase_ : List[Any] = root.get_left()
if left_child is None:
break
lowercase_ : str = left_child
return root.get_data()
def lowerCamelCase ( UpperCAmelCase__ : MyNode , UpperCAmelCase__ : Any ) -> MyNode | None:
lowercase_ : Union[str, Any] = root.get_left()
lowercase_ : int = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowercase_ : Any = get_left_most(UpperCAmelCase__ )
root.set_data(UpperCAmelCase__ )
root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
elif left_child is not None:
lowercase_ : Dict = left_child
elif right_child is not None:
lowercase_ : Dict = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
if get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowercase_ : str = left_rotation(UpperCAmelCase__ )
else:
lowercase_ : List[str] = rl_rotation(UpperCAmelCase__ )
elif get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowercase_ : Optional[Any] = right_rotation(UpperCAmelCase__ )
else:
lowercase_ : List[str] = lr_rotation(UpperCAmelCase__ )
lowercase_ : int = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(UpperCAmelCase__ )
return root
class __magic_name__ :
def __init__( self : List[str] ):
lowercase_ : MyNode | None = None
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return get_height(self.root )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
print("""insert:""" + str(lowercase_ ) )
lowercase_ : Dict = insert_node(self.root , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
print("""delete:""" + str(lowercase_ ) )
if self.root is None:
print("""Tree is empty!""" )
return
lowercase_ : int = del_node(self.root , lowercase_ )
def __str__( self : Optional[int] , ): # a level traversale, gives a more intuitive look on the tree
lowercase_ : int = """"""
lowercase_ : Optional[int] = MyQueue()
q.push(self.root )
lowercase_ : Optional[Any] = self.get_height()
if layer == 0:
return output
lowercase_ : Optional[int] = 0
while not q.is_empty():
lowercase_ : Any = q.pop()
lowercase_ : List[Any] = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowercase_ )
q.push(lowercase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowercase_ : Union[str, Any] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowercase_ ) - 1:
lowercase_ : Optional[Any] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowerCamelCase ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_lowercase : Optional[Any] = AVLtree()
_lowercase : Optional[Any] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 239
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __magic_name__ :
def __init__( self : str , lowercase_ : Dict ):
if isinstance(lowercase_ , lowercase_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowercase_ : List[Any] = deepcopy(lowercase_ )
elif os.path.exists(lowercase_ ):
with io.open(lowercase_ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ : Union[str, Any] = json.load(lowercase_ )
else:
try:
lowercase_ : int = baseaa.urlsafe_baadecode(lowercase_ ).decode("""utf-8""" )
lowercase_ : str = json.loads(lowercase_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
lowercase_ : Any = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowercase_ : Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowercase_ : str = False
if self.is_zeroa() or self.is_zeroa():
lowercase_ : Dict = set(["""cpu""", """nvme"""] )
lowercase_ : List[Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowercase_ : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Any ):
lowercase_ : Optional[Any] = self.config
# find the config node of interest if it exists
lowercase_ : Tuple = ds_key_long.split(""".""" )
lowercase_ : Union[str, Any] = nodes.pop()
for node in nodes:
lowercase_ : List[str] = config.get(lowercase_ )
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : List[str]=None ):
lowercase_ , lowercase_ : List[Any] = self.find_config_node(lowercase_ )
if config is None:
return default
return config.get(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : int=False ):
lowercase_ : int = self.config
# find the config node of interest if it exists
lowercase_ : Dict = ds_key_long.split(""".""" )
for node in nodes:
lowercase_ : List[Any] = config
lowercase_ : Dict = config.get(lowercase_ )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple ):
lowercase_ : str = self.get_value(lowercase_ )
return False if value is None else bool(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Union[str, Any] = self.get_value(lowercase_ )
return False if value is None else not bool(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return self._offload
class __magic_name__ :
def __init__( self : Any , lowercase_ : Union[str, Any] ):
lowercase_ : Any = engine
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , **lowercase_ : str ):
# runs backpropagation and handles mixed precision
self.engine.backward(lowercase_ , **lowercase_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[Any] , lowercase_ : Tuple ):
super().__init__(lowercase_ , device_placement=lowercase_ , scaler=lowercase_ )
lowercase_ : Any = hasattr(self.optimizer , """overflow""" )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Tuple=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple ):
super().__init__(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __magic_name__ :
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str]=0.0_01 , lowercase_ : List[str]=0 , **lowercase_ : List[Any] ):
lowercase_ : str = params
lowercase_ : List[Any] = lr
lowercase_ : int = weight_decay
lowercase_ : Union[str, Any] = kwargs
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[str]=None , lowercase_ : int=0 , **lowercase_ : int ):
lowercase_ : Union[str, Any] = optimizer
lowercase_ : List[str] = total_num_steps
lowercase_ : Dict = warmup_num_steps
lowercase_ : Dict = kwargs
| 239
| 1
|
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def lowercase__(A , A , A ) ->tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowercase__ : str= b * b - 4 * a * c
lowercase__ : Any= (-b + sqrt(A )) / (2 * a)
lowercase__ : int= (-b - sqrt(A )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowercase__() ->Optional[Any]:
"""simple docstring"""
lowercase__, lowercase__ : Any= quadratic_roots(a=5 , b=6 , c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 150
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowercase__(A ) ->Dict:
"""simple docstring"""
lowercase__ : Dict= checkpoints.load_tax_checkpoint(A )
lowercase__ : List[Any]= flatten_dict(A )
return flax_params
def lowercase__(A ) ->Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any]= {}
lowercase__ : Any= {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
lowercase__ : Tuple= {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase__ : Any= ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase__ : List[Any]= new_key.replace(A , A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase__ : List[str]= new_key.replace(A , A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase__ : List[str]= re.sub(R"layers_(\d+)" , R"layer.\1" , A )
lowercase__ : Any= new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase__ : str= re.sub(R"layers_(\d+)" , R"layer.\1" , A )
lowercase__ : Dict= flax_dict[key]
lowercase__ : str= {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase__ : Dict= torch.from_numpy(converted_dict[key].T )
else:
lowercase__ : Any= torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowercase__(A , A , A=False , A=False ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple= get_flax_param(A )
if not use_large:
lowercase__ : int= PixaStructVisionConfig()
lowercase__ : str= PixaStructTextConfig()
else:
lowercase__ : List[Any]= PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase__ : Optional[Any]= PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
lowercase__ : Any= PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=A )
lowercase__ : Optional[Any]= PixaStructForConditionalGeneration(A )
lowercase__ : Tuple= rename_and_convert_flax_params(A )
model.load_state_dict(A )
lowercase__ : List[str]= AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
lowercase__ : List[str]= PixaStructImageProcessor()
lowercase__ : Dict= PixaStructProcessor(image_processor=A , tokenizer=A )
if use_large:
lowercase__ : Tuple= 4_096
lowercase__ : List[Any]= True
# mkdir if needed
os.makedirs(A , exist_ok=A )
model.save_pretrained(A )
processor.save_pretrained(A )
print("Model saved in {}".format(A ) )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
a : Optional[int] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 150
| 1
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = [False] * len(__snake_case )
UpperCAmelCase_ : Dict = [-1] * len(__snake_case )
def dfs(__snake_case : Dict , __snake_case : Tuple ):
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Dict = c
for u in graph[v]:
if not visited[u]:
dfs(__snake_case , 1 - c )
for i in range(len(__snake_case ) ):
if not visited[i]:
dfs(__snake_case , 0 )
for i in range(len(__snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__UpperCAmelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase_ = False
class _A ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def __a ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase : Optional[int] = torch.manual_seed(0 )
lowercase : Tuple = pipe(
image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowercase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : str = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 360
|
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCAmelCase_ = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCAmelCase_ = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __a ( self : Any , _A : Dict , _A : Any , _A : Any=None , _A : Any="uniform_average" , _A : Optional[Any]=True ) -> Dict:
"""simple docstring"""
lowercase : Any = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse}
| 116
| 0
|
from statistics import mean, stdev
def lowerCamelCase__ ( a__ : list , a__ : int = 3 ) -> list:
UpperCamelCase_ = min(a__ )
UpperCamelCase_ = max(a__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , a__ ) for x in data]
def lowerCamelCase__ ( a__ : list , a__ : int = 3 ) -> list:
UpperCamelCase_ = mean(a__ )
UpperCamelCase_ = stdev(a__ )
# standardize data
return [round((x - mu) / (sigma) , a__ ) for x in data]
| 122
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def lowerCamelCase__ ( a__ : int , a__ : str , a__ : LevitConfig , a__ : Path , a__ : bool = True ) -> Any:
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
UpperCamelCase_ = timm.create_model("""levit_128s""" , pretrained=a__ )
else:
UpperCamelCase_ = timm.create_model("""levit_128""" , pretrained=a__ )
if hidden_sizes == 192:
UpperCamelCase_ = timm.create_model("""levit_192""" , pretrained=a__ )
if hidden_sizes == 256:
UpperCamelCase_ = timm.create_model("""levit_256""" , pretrained=a__ )
if hidden_sizes == 384:
UpperCamelCase_ = timm.create_model("""levit_384""" , pretrained=a__ )
from_model.eval()
UpperCamelCase_ = LevitForImageClassificationWithTeacher(a__ ).eval()
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = from_model.state_dict()
UpperCamelCase_ = list(from_model.state_dict().keys() )
UpperCamelCase_ = list(our_model.state_dict().keys() )
print(len(a__ ) , len(a__ ) )
for i in range(len(a__ ) ):
UpperCamelCase_ = weights[og_keys[i]]
our_model.load_state_dict(a__ )
UpperCamelCase_ = torch.randn((2, 3, 224, 224) )
UpperCamelCase_ = from_model(a__ )
UpperCamelCase_ = our_model(a__ ).logits
assert torch.allclose(a__ , a__ ), "The model logits don't match the original one."
UpperCamelCase_ = name
print(a__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCamelCase_ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def lowerCamelCase__ ( a__ : Path , a__ : str = None , a__ : bool = True ) -> str:
UpperCamelCase_ = """imagenet-1k-id2label.json"""
UpperCamelCase_ = 1000
UpperCamelCase_ = (1, num_labels)
UpperCamelCase_ = """huggingface/label-files"""
UpperCamelCase_ = num_labels
UpperCamelCase_ = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase_ = {int(a__ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
UpperCamelCase_ = partial(a__ , num_labels=a__ , idalabel=a__ , labelaid=a__ )
UpperCamelCase_ = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
UpperCamelCase_ = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , a__ , names_to_config[model_name] , a__ , a__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , a__ , a__ , a__ , a__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 122
| 1
|
'''simple docstring'''
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__a , int(b / 2 ) ) * actual_power(__a , int(b / 2 ) )
else:
return a * actual_power(__a , int(b / 2 ) ) * actual_power(__a , int(b / 2 ) )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(__a , __a )
return actual_power(__a , __a )
if __name__ == "__main__":
print(power(-2, -3))
| 5
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase = get_tests_dir('fixtures')
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = mock.Mock()
lowercase__ = 500
lowercase__ = {}
lowercase__ = HTTPError
lowercase__ = {}
# Download this model to make sure it's in the cache.
lowercase__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowerCAmelCase ) as mock_head:
lowercase__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class _a ( unittest.TestCase ):
@classmethod
def lowerCamelCase_ ( cls: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = TOKEN
HfFolder.save_token(_lowerCAmelCase )
@classmethod
def lowerCamelCase_ ( cls: Tuple ) -> Tuple:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def lowerCamelCase_ ( self: Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCAmelCase , repo_id='''test-feature-extractor''' , push_to_hub=_lowerCAmelCase , use_auth_token=self._token )
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
lowercase__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCAmelCase , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=_lowerCAmelCase , use_auth_token=self._token )
lowercase__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
lowercase__ = CustomFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
lowercase__ = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=_lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 110
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = 0
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase ).to_dict()
config_dict.pop('image_processor_type' )
SCREAMING_SNAKE_CASE_ = CLIPImageProcessor(**_lowerCAmelCase )
# save in new folder
model_config.save_pretrained(_lowerCAmelCase )
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE_ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'clip-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('clip-base' )
def lowerCAmelCase_ ( self : List[Any] ):
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self : str ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def lowerCAmelCase_ ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def lowerCAmelCase_ ( self : Optional[Any] ):
try:
AutoConfig.register('custom' , _lowerCAmelCase )
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
SCREAMING_SNAKE_CASE_ = CustomImageProcessor.from_pretrained(_lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase_ ( self : Union[str, Any] ):
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
try:
AutoConfig.register('custom' , _lowerCAmelCase )
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(_lowerCAmelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 225
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
if any(not isinstance(lowercase__ , lowercase__ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(lowercase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowercase__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 12
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass
| 12
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
snake_case = BertConfig.from_json_file(_UpperCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case = BertForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 150
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = data
snake_case = None
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
snake_case = None
snake_case = None
def __iter__( self ):
"""simple docstring"""
snake_case = self.head
while self.head:
yield node.data
snake_case = node.next
if node == self.head:
break
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join(str(lowerCAmelCase ) for item in iter(self ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.insert_nth(len(self ) , lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.insert_nth(0 , lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
snake_case = Node(lowerCAmelCase )
if self.head is None:
snake_case = new_node # first node points itself
snake_case = snake_case = new_node
elif index == 0: # insert at head
snake_case = self.head
snake_case = snake_case = new_node
else:
snake_case = self.head
for _ in range(index - 1 ):
snake_case = temp.next
snake_case = temp.next
snake_case = new_node
if index == len(self ) - 1: # insert at tail
snake_case = new_node
def snake_case ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def snake_case ( self ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def snake_case ( self , lowerCAmelCase = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
snake_case = self.head
if self.head == self.tail: # just one node
snake_case = snake_case = None
elif index == 0: # delete head node
snake_case = self.tail.next.next
snake_case = self.head.next
else:
snake_case = self.head
for _ in range(index - 1 ):
snake_case = temp.next
snake_case = temp.next
snake_case = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case = temp
return delete_node.data
def snake_case ( self ):
"""simple docstring"""
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
"""simple docstring"""
snake_case = CircularLinkedList()
assert len(_UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(_UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_UpperCamelCase ) == i
circular_linked_list.insert_nth(_UpperCamelCase , i + 1 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150
| 1
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=8 , __UpperCAmelCase=["stage1", "stage2", "stage3"] , __UpperCAmelCase=[1, 2, 3] , ) -> List[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =embed_dim
_lowerCAmelCase =depths
_lowerCAmelCase =num_heads
_lowerCAmelCase =window_size
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =hidden_act
_lowerCAmelCase =use_absolute_embeddings
_lowerCAmelCase =patch_norm
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =initializer_range
_lowerCAmelCase =is_training
_lowerCAmelCase =scope
_lowerCAmelCase =use_labels
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =encoder_stride
_lowerCAmelCase =out_features
_lowerCAmelCase =out_indices
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> Tuple:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_lowerCAmelCase =MaskFormerSwinModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCAmelCase =model(_UpperCamelCase )
_lowerCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase =MaskFormerSwinBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCAmelCase =model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_UpperCamelCase ):
_lowerCAmelCase =["""stem"""]
_lowerCAmelCase =MaskFormerSwinBackbone(config=_UpperCamelCase )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =MaskFormerSwinModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=_UpperCamelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self ) -> List[str]:
return
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowerCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowerCAmelCase ( self ) -> Dict:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(_UpperCamelCase )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowerCAmelCase ( self ) -> Optional[int]:
pass
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
_lowerCAmelCase =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase =model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_lowerCAmelCase =outputs.hidden_states
_lowerCAmelCase =getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# Swin has a different seq_length
_lowerCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCAmelCase =True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =3
_lowerCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCAmelCase =True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowerCAmelCase ( self ) -> str:
pass
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__UpperCAmelCase ):
_lowerCAmelCase =0
return t
def check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase={} ):
with torch.no_grad():
_lowerCAmelCase =model(**_UpperCamelCase , return_dict=_UpperCamelCase , **_UpperCamelCase )
_lowerCAmelCase =model(**_UpperCamelCase , return_dict=_UpperCamelCase , **_UpperCamelCase ).to_tuple()
def recursive_check(__UpperCAmelCase , __UpperCAmelCase ):
if isinstance(_UpperCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCamelCase , _UpperCamelCase ):
recursive_check(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_UpperCamelCase , _UpperCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCamelCase ) , set_nan_tensor_to_zero(_UpperCamelCase ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_UpperCamelCase ).any()} and `inf`: {torch.isinf(_UpperCamelCase )}. Dict has'''
f''' `nan`: {torch.isnan(_UpperCamelCase ).any()} and `inf`: {torch.isinf(_UpperCamelCase )}.'''
) , )
recursive_check(_UpperCamelCase , _UpperCamelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCAmelCase =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
_lowerCAmelCase =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_lowerCAmelCase =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
_lowerCAmelCase =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_lowerCAmelCase =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
_lowerCAmelCase =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {"""output_hidden_states""": True} )
_lowerCAmelCase =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
_lowerCAmelCase =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {"""output_hidden_states""": True} )
@require_torch
class lowerCamelCase__ ( unittest.TestCase , lowercase__ ):
'''simple docstring'''
lowerCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase = MaskFormerSwinConfig
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =MaskFormerSwinModelTester(self )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_lowerCAmelCase =backbone_class(_UpperCamelCase )
backbone.to(_UpperCamelCase )
backbone.eval()
_lowerCAmelCase =backbone(**_UpperCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _UpperCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_lowerCAmelCase =backbone(**_UpperCamelCase , output_hidden_states=_UpperCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_lowerCAmelCase =hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_lowerCAmelCase =backbone(**_UpperCamelCase , output_attentions=_UpperCamelCase )
self.assertIsNotNone(outputs.attentions )
| 358
|
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCamelCase() -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 341
| 0
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
snake_case_ : Optional[Any] = "\\n Text data.\n Second line of data."
snake_case_ : Dict = "file"
@pytest.fixture(scope='''session''' )
def A (__A : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
UpperCAmelCase_ = bytes(__A , '''utf-8''' )
with zstd.open(__A , '''wb''' ) as f:
f.write(__A )
return path
@pytest.fixture
def A (__A : List[str] ) -> Dict:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , __A ) , '''w''' ) as f:
f.write(__A )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def A (__A : Tuple , __A : Any , __A : Optional[int] , __A : List[str] , __A : Optional[Any] , __A : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
UpperCAmelCase_ = input_paths[compression_format]
UpperCAmelCase_ = tmp_path / '''cache'''
UpperCAmelCase_ = DownloadConfig(cache_dir=__A , extract_compressed_file=__A )
UpperCAmelCase_ = cached_path(__A , download_config=__A )
with open(__A ) as f:
UpperCAmelCase_ = f.read()
with open(__A ) as f:
UpperCAmelCase_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def A (__A : str , __A : Any , __A : Dict , __A : Union[str, Any] , __A : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = '''custom_cache'''
UpperCAmelCase_ = '''custom_extracted_dir'''
UpperCAmelCase_ = tmp_path / '''custom_extracted_path'''
if default_extracted:
UpperCAmelCase_ = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __A )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__A ) )
UpperCAmelCase_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCAmelCase_ = xz_file
UpperCAmelCase_ = (
DownloadConfig(extract_compressed_file=__A )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__A )
)
UpperCAmelCase_ = cached_path(__A , download_config=__A )
assert Path(__A ).parent.parts[-2:] == expected
def A (__A : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = str(Path(__A ).resolve() )
assert cached_path(__A ) == text_file
# relative path
UpperCAmelCase_ = str(Path(__A ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__A ) == text_file
def A (__A : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__A ):
cached_path(__A )
# relative path
UpperCAmelCase_ = '''./__missing_file__.txt'''
with pytest.raises(__A ):
cached_path(__A )
def A (__A : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__A ) as f:
UpperCAmelCase_ = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def A () -> Any:
"""simple docstring"""
with pytest.raises(__A ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def A (__A : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__A ):
http_get('''https://huggingface.co''' , temp_file=__A )
with pytest.raises(__A ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def A (__A : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__A ):
ftp_get('''ftp://huggingface.co''' , temp_file=__A )
with pytest.raises(__A ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def A (__A : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__A ):
fsspec_get('''s3://huggingface.co''' , temp_file=__A )
with pytest.raises(__A ):
fsspec_head('''s3://huggingface.co''' )
| 51
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_:str = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116
| 0
|
'''simple docstring'''
lowerCAmelCase : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def A_( A : bytes):
# Make sure the supplied data is a bytes-like object
if not isinstance(A , A):
UpperCamelCase = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A)
UpperCamelCase = ''.join(bin(A)[2:].zfill(8) for byte in data)
UpperCamelCase = len(A) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase = b'=' * ((6 - len(A) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A) % 6)
else:
UpperCamelCase = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2)]
for index in range(0 , len(A) , 6)).encode()
+ padding
)
def A_( A : str):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(A , A) and not isinstance(A , A):
UpperCamelCase = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A)
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A):
try:
UpperCamelCase = encoded_data.decode('utf-8')
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters')
UpperCamelCase = encoded_data.count('=')
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding]), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data), "Invalid base64 character(s) found."
# Check the padding
assert len(A) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase = encoded_data[:-padding]
UpperCamelCase = ''.join(
bin(B64_CHARSET.index(A))[2:].zfill(6) for char in encoded_data)[: -padding * 2]
else:
UpperCamelCase = ''.join(
bin(B64_CHARSET.index(A))[2:].zfill(6) for char in encoded_data)
UpperCamelCase = [
int(binary_stream[index : index + 8] , 2)
for index in range(0 , len(A) , 8)
]
return bytes(A)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251
|
'''simple docstring'''
from ... import PretrainedConfig
lowerCAmelCase : List[str] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase_ = """nezha"""
def __init__( self , A_=21128 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=64 , A_=2 , A_=0.02 , A_=1e-12 , A_=0.1 , A_=0 , A_=2 , A_=3 , A_=True , **A_ , )-> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = max_relative_position
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
| 251
| 1
|
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5
|
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5
| 1
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "x" , SCREAMING_SNAKE_CASE__ = 10**-10 , SCREAMING_SNAKE_CASE__ = 1 , ) -> complex:
__snake_case: Any = symbols(SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = lambdify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Dict = lambdify(SCREAMING_SNAKE_CASE__ , diff(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
__snake_case: Union[str, Any] = starting_point
while True:
if diff_function(SCREAMING_SNAKE_CASE__) != 0:
__snake_case: str = prev_guess - multiplicity * func(SCREAMING_SNAKE_CASE__) / diff_function(
SCREAMING_SNAKE_CASE__)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
__snake_case: Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 293
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
| 1
|
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if any(not isinstance(A__ , A__ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(A__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(A__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 12
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'sentencepiece.bpe.model'}
_a = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
_a = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
_a = '▁'
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
UpperCAmelCase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase_ : int = vocab_file
UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
UpperCAmelCase_ : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
UpperCAmelCase_ : Union[str, Any] = len(self.sp_model ) - 1
UpperCAmelCase_ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Any = self.sp_model.PieceToId(lowercase_ )
return spm_id if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Optional[Any] = ""
UpperCAmelCase_ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : List[str] = []
else:
current_sub_tokens.append(lowercase_ )
UpperCAmelCase_ : List[Any] = False
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : Dict = None
return state
def __setstate__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : int = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
UpperCAmelCase_ : int = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 23
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
UpperCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : str = latents.to(lowercase_ )
UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : str = self._execution_device
UpperCAmelCase_ : List[Any] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
UpperCAmelCase_ : List[str] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds}
UpperCAmelCase_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 )
UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5
UpperCAmelCase_ : int = image.clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 23
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "biogpt"
def __init__( self : Any , _UpperCamelCase : Dict=4_2_3_8_4 , _UpperCamelCase : Dict=1_0_2_4 , _UpperCamelCase : Optional[Any]=2_4 , _UpperCamelCase : Union[str, Any]=1_6 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : Union[str, Any]="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Union[str, Any]=1_0_2_4 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : str=1e-12 , _UpperCamelCase : List[str]=True , _UpperCamelCase : str=True , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Optional[Any]=0.0 , _UpperCamelCase : int=1 , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Any=2 , **_UpperCamelCase : List[str] , ) ->List[Any]:
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = scale_embedding
snake_case_ = use_cache
snake_case_ = layerdrop
snake_case_ = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
| 8
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "deberta-v2"
def __init__(self , UpperCAmelCase=128100 , UpperCAmelCase=1536 , UpperCAmelCase=24 , UpperCAmelCase=24 , UpperCAmelCase=6144 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-7 , UpperCAmelCase=False , UpperCAmelCase=-1 , UpperCAmelCase=0 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=0 , UpperCAmelCase="gelu" , **UpperCAmelCase , ) -> List[str]:
super().__init__(**UpperCAmelCase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = relative_attention
_snake_case = max_relative_positions
_snake_case = pad_token_id
_snake_case = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase ) == str:
_snake_case = [x.strip() for x in pos_att_type.lower().split("""|""" )]
_snake_case = pos_att_type
_snake_case = vocab_size
_snake_case = layer_norm_eps
_snake_case = kwargs.get("""pooler_hidden_size""" , UpperCAmelCase )
_snake_case = pooler_dropout
_snake_case = pooler_hidden_act
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
@property
def lowercase (self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowercase (self ) -> int:
return 12
def lowercase (self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 3 , UpperCAmelCase = 40 , UpperCAmelCase = 40 , UpperCAmelCase = None , ) -> Mapping[str, Any]:
_snake_case = super().generate_dummy_inputs(preprocessor=UpperCAmelCase , framework=UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 341
| 0
|
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(1_00, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 357
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :List[Any] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ , a__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ , a__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,controlnet=__snake_case ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ = controlnet_params
a__ = 'bird'
a__ = jax.device_count()
a__ = pipe.prepare_text_inputs([prompts] * num_samples )
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
a__ = pipe.prepare_image_inputs([canny_image] * num_samples )
a__ = jax.random.PRNGKey(0 )
a__ = jax.random.split(__snake_case ,jax.device_count() )
a__ = replicate(__snake_case )
a__ = shard(__snake_case )
a__ = shard(__snake_case )
a__ = pipe(
prompt_ids=__snake_case ,image=__snake_case ,params=__snake_case ,prng_seed=__snake_case ,num_inference_steps=50 ,jit=__snake_case ,).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a__ = images[0, 2_53:2_56, 2_53:2_56, -1]
a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a__ = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ , a__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ , a__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,controlnet=__snake_case ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ = controlnet_params
a__ = 'Chef in the kitchen'
a__ = jax.device_count()
a__ = pipe.prepare_text_inputs([prompts] * num_samples )
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
a__ = pipe.prepare_image_inputs([pose_image] * num_samples )
a__ = jax.random.PRNGKey(0 )
a__ = jax.random.split(__snake_case ,jax.device_count() )
a__ = replicate(__snake_case )
a__ = shard(__snake_case )
a__ = shard(__snake_case )
a__ = pipe(
prompt_ids=__snake_case ,image=__snake_case ,params=__snake_case ,prng_seed=__snake_case ,num_inference_steps=50 ,jit=__snake_case ,).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a__ = images[0, 2_53:2_56, 2_53:2_56, -1]
a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a__ = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 109
| 0
|
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
'''simple docstring'''
def __init__( self, A, A=2, A=8, A=True, A=True, A=True, A=True, A=99, A=16, A=5, A=2, A=36, A="gelu", A=0.0, A=0.0, A=512, A=16, A=2, A=0.02, A=3, A=4, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = num_labels
SCREAMING_SNAKE_CASE : Optional[int] = num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=A, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
SCREAMING_SNAKE_CASE : List[Any] = 300
return config
def UpperCamelCase_ ( self ):
'''simple docstring'''
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MraModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(A, attention_mask=A, token_type_ids=A )
SCREAMING_SNAKE_CASE : int = model(A, token_type_ids=A )
SCREAMING_SNAKE_CASE : Dict = model(A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A, A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : str = MraModel(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
A, attention_mask=A, token_type_ids=A, encoder_hidden_states=A, encoder_attention_mask=A, )
SCREAMING_SNAKE_CASE : int = model(
A, attention_mask=A, token_type_ids=A, encoder_hidden_states=A, )
SCREAMING_SNAKE_CASE : Optional[Any] = model(A, attention_mask=A, token_type_ids=A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = MraForMaskedLM(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(A, attention_mask=A, token_type_ids=A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MraForQuestionAnswering(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
A, attention_mask=A, token_type_ids=A, start_positions=A, end_positions=A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = MraForSequenceClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(A, attention_mask=A, token_type_ids=A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = MraForTokenClassification(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(A, attention_mask=A, token_type_ids=A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE : List[str] = MraForMultipleChoice(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
SCREAMING_SNAKE_CASE : int = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
A : Any = False
A : str = False
A : Any = False
A : Any = False
A : Any = ()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MraModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self, config_class=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : List[str] = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = MraModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason='MRA does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
SCREAMING_SNAKE_CASE : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(A )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape, A )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
SCREAMING_SNAKE_CASE : int = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(A )[0]
SCREAMING_SNAKE_CASE : List[Any] = 50_265
SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape, A )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
SCREAMING_SNAKE_CASE : List[str] = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(A )[0]
SCREAMING_SNAKE_CASE : int = 50_265
SCREAMING_SNAKE_CASE : str = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], A, atol=1E-4 ) )
| 251
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''openai-gpt'''
A : str = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A="gelu", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A="cls_index", A=True, A=None, A=True, A=0.1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = n_positions
SCREAMING_SNAKE_CASE : List[str] = n_embd
SCREAMING_SNAKE_CASE : Optional[Any] = n_layer
SCREAMING_SNAKE_CASE : Optional[Any] = n_head
SCREAMING_SNAKE_CASE : str = afn
SCREAMING_SNAKE_CASE : List[str] = resid_pdrop
SCREAMING_SNAKE_CASE : int = embd_pdrop
SCREAMING_SNAKE_CASE : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = summary_type
SCREAMING_SNAKE_CASE : Tuple = summary_use_proj
SCREAMING_SNAKE_CASE : Dict = summary_activation
SCREAMING_SNAKE_CASE : Tuple = summary_first_dropout
SCREAMING_SNAKE_CASE : List[str] = summary_proj_to_labels
super().__init__(**A )
| 251
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __snake_case (A_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __snake_case (A_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 360
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = None
if token is not None:
_lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : Tuple = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_lowerCAmelCase : str = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
_lowerCAmelCase : Optional[Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_lowerCAmelCase : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : int = requests.get(url + F"&page={i + 2}" , headers=UpperCamelCase_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any]=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
if token is not None:
_lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : Optional[int] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
_lowerCAmelCase : Optional[int] = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
_lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_lowerCAmelCase : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : List[str] = requests.get(url + F"&page={i + 2}" , headers=UpperCamelCase_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ):
'''simple docstring'''
_lowerCAmelCase : str = None
if token is not None:
_lowerCAmelCase : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : List[str] = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ , allow_redirects=UpperCamelCase_ )
_lowerCAmelCase : List[str] = result.headers["""Location"""]
_lowerCAmelCase : List[Any] = requests.get(UpperCamelCase_ , allow_redirects=UpperCamelCase_ )
_lowerCAmelCase : int = os.path.join(UpperCamelCase_ , F"{artifact_name}.zip" )
with open(UpperCamelCase_ , """wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]=None ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = []
_lowerCAmelCase : Union[str, Any] = None
with zipfile.ZipFile(UpperCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCamelCase_ ) as f:
for line in f:
_lowerCAmelCase : List[str] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase : Union[str, Any] = line[: line.index(""": """ )]
_lowerCAmelCase : Union[str, Any] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_lowerCAmelCase : Tuple = line[len("""FAILED """ ) :]
failed_tests.append(UpperCamelCase_ )
elif filename == "job_name.txt":
_lowerCAmelCase : str = line
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase_ )} for `errors` "
F"and {len(UpperCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
_lowerCAmelCase : int = None
if job_name and job_links:
_lowerCAmelCase : Optional[int] = job_links.get(UpperCamelCase_ , UpperCamelCase_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase : Tuple = [x + [y] + [job_link] for x, y in zip(UpperCamelCase_ , UpperCamelCase_ )]
return result
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[Any] = [os.path.join(UpperCamelCase_ , UpperCamelCase_ ) for p in os.listdir(UpperCamelCase_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCamelCase_ , job_links=UpperCamelCase_ ) )
return errors
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase : Dict = counter.most_common()
_lowerCAmelCase : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase : Union[str, Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase : int = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=UpperCamelCase_ ) )
return r
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_lowerCAmelCase : Optional[Any] = test.split("""/""" )[2]
else:
_lowerCAmelCase : Union[str, Any] = None
return test
def _UpperCAmelCase (UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase : List[str] = [x for x in logs if x[2] is not None]
_lowerCAmelCase : int = {x[2] for x in logs}
_lowerCAmelCase : str = {}
for test in tests:
_lowerCAmelCase : Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase : List[Any] = counter.most_common()
_lowerCAmelCase : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase : List[str] = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase : int = {"""count""": n_errors, """errors""": error_counts}
_lowerCAmelCase : Dict = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=UpperCamelCase_ ) )
return r
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = """| no. | error | status |"""
_lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
_lowerCAmelCase : str = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase : Optional[Any] = reduced_by_error[error]["""count"""]
_lowerCAmelCase : int = F"| {count} | {error[:100]} | |"
lines.append(UpperCamelCase_ )
return "\n".join(UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
_lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
_lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase : Union[str, Any] = reduced_by_model[model]["""count"""]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
_lowerCAmelCase : str = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(UpperCamelCase_ )
return "\n".join(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_lowerCamelCase : Tuple = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_lowerCamelCase : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
_lowerCamelCase : int = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_lowerCamelCase : Optional[Any] = k.find(" / ")
_lowerCamelCase : Tuple = k[index + len(" / ") :]
_lowerCamelCase : List[Any] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_lowerCamelCase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_lowerCamelCase : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_lowerCamelCase : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_lowerCamelCase : Union[str, Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_lowerCamelCase : str = reduce_by_error(errors)
_lowerCamelCase : Tuple = reduce_by_model(errors)
_lowerCamelCase : List[str] = make_github_table(reduced_by_error)
_lowerCamelCase : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 159
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = """vit_msn"""
def __init__( self , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-06 , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=1_6 , __UpperCAmelCase=3 , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Any = hidden_size
lowerCAmelCase__ :Tuple = num_hidden_layers
lowerCAmelCase__ :str = num_attention_heads
lowerCAmelCase__ :Optional[Any] = intermediate_size
lowerCAmelCase__ :Dict = hidden_act
lowerCAmelCase__ :str = hidden_dropout_prob
lowerCAmelCase__ :str = attention_probs_dropout_prob
lowerCAmelCase__ :Optional[int] = initializer_range
lowerCAmelCase__ :Dict = layer_norm_eps
lowerCAmelCase__ :List[Any] = image_size
lowerCAmelCase__ :Tuple = patch_size
lowerCAmelCase__ :Any = num_channels
lowerCAmelCase__ :str = qkv_bias
| 293
|
"""simple docstring"""
from __future__ import annotations
__A = tuple[int, int, int]
__A = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__A = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
__A = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
__A = """FOBHMDKEXQNRAULPGSJVTYICZW"""
__A = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
__A = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
__A = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
__A = """SGLCPQWZHKXAREONTFBVIYJUDM"""
__A = """HVSICLTYKQUBXDWAJZOMFGPREN"""
__A = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
__A = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
__A = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""simple docstring"""
if (unique_rotsel := len(set(_SCREAMING_SNAKE_CASE ) )) < 3:
lowerCAmelCase__ :Union[str, Any] = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(_SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = rotpos
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Tuple = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Union[str, Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_SCREAMING_SNAKE_CASE )
# Validates string and returns dict
lowerCAmelCase__ :int = _plugboard(_SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def __A (_SCREAMING_SNAKE_CASE ) ->dict[str, str]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = F"Plugboard setting isn't type string ({type(_SCREAMING_SNAKE_CASE )})"
raise TypeError(_SCREAMING_SNAKE_CASE )
elif len(_SCREAMING_SNAKE_CASE ) % 2 != 0:
lowerCAmelCase__ :str = F"Odd number of symbols ({len(_SCREAMING_SNAKE_CASE )})"
raise Exception(_SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCAmelCase__ :Any = set()
for i in pbstring:
if i not in abc:
lowerCAmelCase__ :Any = F"'{i}' not in list of symbols"
raise Exception(_SCREAMING_SNAKE_CASE )
elif i in tmppbl:
lowerCAmelCase__ :Dict = F"Duplicate symbol ({i})"
raise Exception(_SCREAMING_SNAKE_CASE )
else:
tmppbl.add(_SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
lowerCAmelCase__ :List[Any] = {}
for j in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ):
lowerCAmelCase__ :Optional[int] = pbstring[j + 1]
lowerCAmelCase__ :Union[str, Any] = pbstring[j]
return pb
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , _SCREAMING_SNAKE_CASE = "" , ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Tuple = text.upper()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = _validator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , plugb.upper() )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = rotor_position
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCAmelCase__ :Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCAmelCase__ :Dict = plugboard[symbol]
# rotor ra --------------------------
lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :str = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :int = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
lowerCAmelCase__ :str = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :Optional[Any] = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCAmelCase__ :str = reflector[symbol]
# 2nd rotors
lowerCAmelCase__ :Tuple = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
lowerCAmelCase__ :Optional[int] = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
lowerCAmelCase__ :Any = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCAmelCase__ :Union[str, Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = """This is my Python script that emulates the Enigma machine from WWII."""
__A = (1, 1, 1)
__A = """pictures"""
__A = (rotora, rotora, rotora)
__A = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 293
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : Dict = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 160
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 160
| 1
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def snake_case_ ( _lowerCAmelCase : Callable , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> np.ndarray:
UpperCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : Tuple = np.zeros((n + 1,) )
UpperCAmelCase : Optional[int] = ya
UpperCAmelCase : Optional[Any] = xa
for k in range(_lowerCAmelCase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowerCAmelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
|
'''simple docstring'''
from math import isclose, sqrt
def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> tuple[float, float, float]:
UpperCAmelCase : Optional[int] = point_y / 4 / point_x
UpperCAmelCase : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCAmelCase : Any = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCAmelCase : Union[str, Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCAmelCase : Union[str, Any] = outgoing_gradient**2 + 4
UpperCAmelCase : Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCAmelCase : List[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
UpperCAmelCase : List[str] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCAmelCase : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCAmelCase : Optional[Any] = x_minus if isclose(_lowerCAmelCase , _lowerCAmelCase ) else x_plus
UpperCAmelCase : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case_ ( _lowerCAmelCase : float = 1.4 , _lowerCAmelCase : float = -9.6 ) -> int:
UpperCAmelCase : int = 0
UpperCAmelCase : float = first_x_coord
UpperCAmelCase : float = first_y_coord
UpperCAmelCase : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = next_point(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"{solution() = }")
| 23
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_UpperCAmelCase : str = None
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : List[str] = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Any = {
"""albert-base-v1""": 5_12,
"""albert-large-v1""": 5_12,
"""albert-xlarge-v1""": 5_12,
"""albert-xxlarge-v1""": 5_12,
"""albert-base-v2""": 5_12,
"""albert-large-v2""": 5_12,
"""albert-xlarge-v2""": 5_12,
"""albert-xxlarge-v2""": 5_12,
}
_UpperCAmelCase : List[Any] = """▁"""
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = AlbertTokenizer
def __init__( self : Optional[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Dict="[CLS]" , UpperCAmelCase : List[Any]="[SEP]" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : int="[MASK]" , **UpperCAmelCase : Tuple , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCamelCase__ : Optional[int] = (
AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase , normalized=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else mask_token
)
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : Dict = do_lower_case
lowerCamelCase__ : Any = remove_space
lowerCamelCase__ : str = keep_accents
lowerCamelCase__ : Any = vocab_file
lowerCamelCase__ : str = False if not self.vocab_file else True
def A_ ( self : List[str] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Tuple = [self.sep_token_id]
lowerCamelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Optional[int] = [self.sep_token_id]
lowerCamelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase__ : Tuple = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 45
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : Tuple = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = ["""PerceiverFeatureExtractor"""]
_UpperCAmelCase : Dict = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 1
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _lowerCamelCase:
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
_lowercase : int = TFVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
_lowercase : Tuple = model(input_ids=_SCREAMING_SNAKE_CASE, pixel_values=_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
_lowercase : List[Any] = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE, text_model=_SCREAMING_SNAKE_CASE)
_lowercase : List[str] = model(input_ids=_SCREAMING_SNAKE_CASE, pixel_values=_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : int = self.get_vision_text_model(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
_lowercase : List[str] = {"""vision_model""": vision_model, """text_model""": text_model}
_lowercase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
_lowercase : List[str] = model(input_ids=_SCREAMING_SNAKE_CASE, pixel_values=_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
_lowercase : Any = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE, text_model=_SCREAMING_SNAKE_CASE)
_lowercase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE, pixel_values=_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE)
_lowercase : Optional[Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE)
_lowercase : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
_lowercase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE, pixel_values=_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE)
_lowercase : Any = after_output[0].numpy()
_lowercase : int = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE, 1E-5)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
_lowercase : int = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE, text_model=_SCREAMING_SNAKE_CASE)
_lowercase : Dict = model(
input_ids=_SCREAMING_SNAKE_CASE, pixel_values=_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE, output_attentions=_SCREAMING_SNAKE_CASE)
_lowercase : Any = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE), vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : Tuple = to_atuple(vision_model.config.image_size)
_lowercase : Optional[int] = to_atuple(vision_model.config.patch_size)
_lowercase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowercase : List[Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : str = np.abs((a - b)).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, F'''Difference between torch and flax is {diff} (>= {tol}).''')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_SCREAMING_SNAKE_CASE)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = self.get_pretrained_model_and_inputs()
_lowercase : List[Any] = model_a(**_SCREAMING_SNAKE_CASE)
_lowercase : List[Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE)
_lowercase : Tuple = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
_lowercase : List[str] = model_a(**_SCREAMING_SNAKE_CASE)
_lowercase : Optional[Any] = after_outputs[0].numpy()
_lowercase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE, 1E-5)
@require_tf
class _lowerCamelCase( UpperCAmelCase__, unittest.TestCase ):
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-random-bert')
_lowercase : int = 13
_lowercase : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_lowercase : Any = ids_tensor([batch_size, 4], model.text_model.config.vocab_size)
_lowercase : int = random_attention_mask([batch_size, 4])
_lowercase : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = TFViTModel(_SCREAMING_SNAKE_CASE, name='vision_model')
_lowercase : Optional[Any] = TFBertModel(_SCREAMING_SNAKE_CASE, name='text_model')
return vision_model, text_model
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = TFViTModelTester(self)
_lowercase : int = TFBertModelTester(self)
_lowercase : str = vit_model_tester.prepare_config_and_inputs()
_lowercase : int = bert_model_tester.prepare_config_and_inputs()
_lowercase : str = vision_config_and_inputs
(
_lowercase
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCamelCase( UpperCAmelCase__, unittest.TestCase ):
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf', 'hf-internal-testing/tiny-random-roberta')
_lowercase : Union[str, Any] = 13
_lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_lowercase : List[Any] = ids_tensor([batch_size, 4], model.text_model.config.vocab_size)
_lowercase : Optional[Any] = random_attention_mask([batch_size, 4])
_lowercase : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = self.get_vision_text_model(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
_lowercase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE, text_model=_SCREAMING_SNAKE_CASE)
_lowercase : List[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE, pixel_values=_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE, output_attentions=_SCREAMING_SNAKE_CASE)
_lowercase : Tuple = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE), vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowercase : Dict = to_atuple(vision_model.config.image_size)
_lowercase : Tuple = to_atuple(vision_model.config.patch_size)
_lowercase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : List[str] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowercase : List[str] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[Any] = TFDeiTModel(_SCREAMING_SNAKE_CASE, name='vision_model')
_lowercase : Union[str, Any] = TFRobertaModel(_SCREAMING_SNAKE_CASE, name='text_model')
return vision_model, text_model
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = TFDeiTModelTester(self)
_lowercase : List[Any] = TFRobertaModelTester(self)
_lowercase : List[Any] = vit_model_tester.prepare_config_and_inputs()
_lowercase : Optional[int] = bert_model_tester.prepare_config_and_inputs()
_lowercase : Tuple = vision_config_and_inputs
(
_lowercase
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCamelCase( UpperCAmelCase__, unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf', 'hf-internal-testing/tiny-random-bert')
_lowercase : Optional[int] = 13
_lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_lowercase : Optional[Any] = ids_tensor([batch_size, 4], model.text_model.config.vocab_size)
_lowercase : Union[str, Any] = random_attention_mask([batch_size, 4])
_lowercase : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : str = TFCLIPVisionModel(_SCREAMING_SNAKE_CASE, name='vision_model')
_lowercase : List[str] = TFBertModel(_SCREAMING_SNAKE_CASE, name='text_model')
return vision_model, text_model
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = TFCLIPVisionModelTester(self)
_lowercase : Dict = TFBertModelTester(self)
_lowercase : str = clip_model_tester.prepare_config_and_inputs()
_lowercase : List[str] = bert_model_tester.prepare_config_and_inputs()
_lowercase : Optional[Any] = vision_config_and_inputs
(
_lowercase
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian', logit_scale_init_value=1.0, from_pt=_SCREAMING_SNAKE_CASE)
_lowercase : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
_lowercase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase : Optional[int] = processor(
text=['una foto di un gatto', 'una foto di un cane'], images=_SCREAMING_SNAKE_CASE, padding=_SCREAMING_SNAKE_CASE, return_tensors='np')
_lowercase : Optional[int] = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
_lowercase : str = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy(), _SCREAMING_SNAKE_CASE, atol=1E-3))
| 21
|
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *UpperCamelCase : str , UpperCamelCase : Optional[Union[Dict, Any]] = None , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[int]=2 ):
from .. import __version__
UpperCAmelCase : Tuple = take_from
UpperCAmelCase : Optional[Any] = ()
if not isinstance(args[0] , UpperCamelCase ):
UpperCAmelCase : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse(UpperCamelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
UpperCAmelCase : Optional[int] = None
if isinstance(UpperCamelCase , UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCamelCase ),)
UpperCAmelCase : List[str] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCamelCase , UpperCamelCase ):
values += (getattr(UpperCamelCase , UpperCamelCase ),)
UpperCAmelCase : List[Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
UpperCAmelCase : int = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
UpperCAmelCase : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , UpperCamelCase , stacklevel=UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) > 0:
UpperCAmelCase : Optional[int] = inspect.getouterframes(inspect.currentframe() )[1]
UpperCAmelCase : Union[str, Any] = call_frame.filename
UpperCAmelCase : List[Any] = call_frame.lineno
UpperCAmelCase : List[str] = call_frame.function
UpperCAmelCase , UpperCAmelCase : Optional[int] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCamelCase ) == 0:
return
elif len(UpperCamelCase ) == 1:
return values[0]
return values
| 109
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = """transfo-xl"""
UpperCamelCase_ : List[str] = ["""mems"""]
UpperCamelCase_ : str = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=26_77_35 , SCREAMING_SNAKE_CASE_ : List[Any]=[2_00_00, 4_00_00, 20_00_00] , SCREAMING_SNAKE_CASE_ : int=10_24 , SCREAMING_SNAKE_CASE_ : Optional[int]=10_24 , SCREAMING_SNAKE_CASE_ : Optional[Any]=16 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=64 , SCREAMING_SNAKE_CASE_ : Tuple=40_96 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Tuple=18 , SCREAMING_SNAKE_CASE_ : Dict=16_00 , SCREAMING_SNAKE_CASE_ : str=10_00 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=-1 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[Any]="normal" , SCREAMING_SNAKE_CASE_ : Dict=0.01 , SCREAMING_SNAKE_CASE_ : Any=0.01 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1E-5 , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Optional[Any]:
'''simple docstring'''
A: Optional[Any] = vocab_size
A: Optional[Any] = []
self.cutoffs.extend(SCREAMING_SNAKE_CASE_ )
if proj_share_all_but_first:
A: Union[str, Any] = [False] + [True] * len(self.cutoffs )
else:
A: Any = [False] + [False] * len(self.cutoffs )
A: Tuple = d_model
A: Union[str, Any] = d_embed
A: Tuple = d_head
A: Optional[int] = d_inner
A: str = div_val
A: Any = pre_lnorm
A: Dict = n_layer
A: Dict = n_head
A: Union[str, Any] = mem_len
A: str = same_length
A: str = attn_type
A: Optional[Any] = clamp_len
A: Any = sample_softmax
A: int = adaptive
A: List[str] = dropout
A: Tuple = dropatt
A: List[str] = untie_r
A: Optional[Any] = init
A: Optional[int] = init_range
A: Dict = proj_init_std
A: Optional[int] = init_std
A: Dict = layer_norm_epsilon
super().__init__(eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> int:
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 369
|
'''simple docstring'''
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
A: Tuple = None
A: Dict = None
A: Optional[int] = graph
self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: str = len(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = None
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
'''simple docstring'''
if sources is int:
A: Union[str, Any] = [sources]
if sinks is int:
A: Tuple = [sinks]
if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0:
return
A: List[str] = sources[0]
A: Optional[int] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1:
A: Any = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A: Dict = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A: Optional[Any] = max_input_flow
A: Optional[Any] = 0
A: str = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A: Optional[Any] = max_input_flow
A: str = size - 1
def _snake_case ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A: Optional[Any] = algorithm(self )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A: str = flow_network
A: List[str] = flow_network.verticesCount
A: Dict = flow_network.sourceIndex
A: Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A: str = flow_network.graph
A: str = False
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
A: str = True
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
# use this to save your result
A: Any = -1
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )]
A: Any = [0] * self.verticies_count
A: Optional[Any] = [0] * self.verticies_count
def _snake_case ( self : str ) -> Optional[Any]:
'''simple docstring'''
A: Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A: str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A: Dict = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
A: Any = vertices_list[i]
A: str = self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) )
A: Tuple = 0
else:
i += 1
A: Tuple = sum(self.preflow[self.source_index] )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.relabel(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
'''simple docstring'''
A: Optional[int] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> int:
'''simple docstring'''
A: Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A: List[Any] = self.heights[to_index]
if min_height is not None:
A: int = min_height + 1
if __name__ == "__main__":
UpperCamelCase = [0]
UpperCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCamelCase = flow_network.find_maximum_flow()
print(f'maximum flow is {maximum_flow}')
| 334
| 0
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'M-CLIP'
def __init__( self : Optional[int],lowercase_ : Union[str, Any]=1_0_2_4,lowercase_ : str=7_6_8,**lowercase_ : List[str] )-> Optional[int]:
'''simple docstring'''
A__ = transformerDimSize
A__ = imageDimSize
super().__init__(**lowercase_ )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = MCLIPConfig
def __init__( self : List[str],lowercase_ : Dict,*lowercase_ : Optional[int],**lowercase_ : str )-> int:
'''simple docstring'''
super().__init__(lowercase_,*lowercase_,**lowercase_ )
A__ = XLMRobertaModel(lowercase_ )
A__ = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case__ ( self : int,lowercase_ : int,lowercase_ : List[Any] )-> Tuple:
'''simple docstring'''
A__ = self.transformer(input_ids=lowercase_,attention_mask=lowercase_ )[0]
A__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowercase_ ), embs
| 7
|
def _lowerCAmelCase ( lowerCAmelCase_ :int = 1_000 )->int:
'''simple docstring'''
snake_case_ , snake_case_ = 1, 1
snake_case_ = 2
while True:
snake_case_ = 0
snake_case_ = fa + fa
snake_case_ , snake_case_ = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 159
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : int = SwinConfig(image_size=192 )
if "base" in model_name:
lowercase__ : Optional[int] = 6
lowercase__ : Any = 128
lowercase__ : Optional[Any] = (2, 2, 18, 2)
lowercase__ : List[Any] = (4, 8, 16, 32)
elif "large" in model_name:
lowercase__ : Optional[Any] = 12
lowercase__ : int = 192
lowercase__ : Tuple = (2, 2, 18, 2)
lowercase__ : Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowercase__ : List[str] = window_size
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : List[Any] = num_heads
return config
def __UpperCamelCase ( UpperCAmelCase ):
if "encoder.mask_token" in name:
lowercase__ : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowercase__ : List[str] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowercase__ : str = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowercase__ : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase__ : Optional[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase__ : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase__ : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase__ : List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase__ : Any = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowercase__ : Union[str, Any] = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowercase__ : List[Any] = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowercase__ : List[Any] = '''swin.''' + name
return name
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(UpperCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase__ : Dict = key.split('''.''' )
lowercase__ : Dict = int(key_split[2] )
lowercase__ : List[str] = int(key_split[4] )
lowercase__ : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ : Optional[int] = val[:dim, :]
lowercase__ : Optional[int] = val[
dim : dim * 2, :
]
lowercase__ : Optional[int] = val[-dim:, :]
else:
lowercase__ : Dict = val[
:dim
]
lowercase__ : Optional[Any] = val[
dim : dim * 2
]
lowercase__ : List[str] = val[
-dim:
]
else:
lowercase__ : int = val
return orig_state_dict
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : str = torch.load(UpperCAmelCase , map_location='''cpu''' )['''model''']
lowercase__ : Optional[int] = get_swin_config(UpperCAmelCase )
lowercase__ : str = SwinForMaskedImageModeling(UpperCAmelCase )
model.eval()
lowercase__ : Any = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
lowercase__ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : Tuple = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowercase__ : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
lowercase__ : List[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
with torch.no_grad():
lowercase__ : str = model(**UpperCAmelCase ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
__a: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__a: List[str] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 214
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = len(UpperCAmelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
lowercase__ : str = 0
print(UpperCAmelCase , end=''',''' )
# Consider rest of the activities
for j in range(UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCAmelCase , end=''',''' )
lowercase__ : str = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__a: str = [1, 3, 0, 5, 8, 5]
__a: Optional[Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 214
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , ):
__a : Any = size if size is not None else {'''height''': 18, '''width''': 18}
__a : Any = parent
__a : Any = batch_size
__a : str = num_channels
__a : Tuple = image_size
__a : Dict = min_resolution
__a : Optional[int] = max_resolution
__a : List[Any] = do_resize
__a : Union[str, Any] = size
__a : List[Any] = apply_ocr
def _lowerCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowerCamelCase ( self ):
__a : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''apply_ocr''' ) )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__a : List[Any] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
# with apply_OCR = True
__a : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
__a : Optional[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__a : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : str = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
__a : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__a : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__a : Dict = image_processing(_UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 160
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
A = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = TaTokenizer
__lowerCAmelCase = []
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=100 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__a : Dict = [f"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__a : Union[str, Any] = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id_''' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__a : Union[str, Any] = vocab_file
__a : int = False if not self.vocab_file else True
__a : List[str] = extra_ids
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__a : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCAmelCase , )
return max_model_length
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__a : List[str] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self ):
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self ):
return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 160
| 1
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowercase ( a__ : str = "AAPL" ) -> str:
_UpperCamelCase = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_UpperCamelCase = BeautifulSoup(requests.get(a__ ).text , '''html.parser''' )
_UpperCamelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 54
|
"""simple docstring"""
import numpy as np
def lowercase ( a__ : Optional[Any] , a__ : str , a__ : Union[str, Any] , a__ : Any , a__ : List[str] ) -> Dict:
_UpperCamelCase = int(np.ceil((x_end - xa) / h ) )
_UpperCamelCase = np.zeros((n + 1,) )
_UpperCamelCase = ya
_UpperCamelCase = xa
for k in range(a__ ):
_UpperCamelCase = f(a__ , y[k] )
_UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_UpperCamelCase = f(x + h , y[k] + h * ka )
_UpperCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 'focalnet'
def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=False , _a=[192, 384, 768, 768] , _a=[2, 2, 6, 2] , _a=[2, 2, 2, 2] , _a=[3, 3, 3, 3] , _a="gelu" , _a=4.0 , _a=0.0 , _a=0.1 , _a=False , _a=1E-4 , _a=False , _a=False , _a=False , _a=0.02 , _a=1E-5 , _a=32 , _a=None , _a=None , **_a , ):
super().__init__(**_a )
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = use_conv_embed
__a = hidden_sizes
__a = depths
__a = focal_levels
__a = focal_windows
__a = hidden_act
__a = mlp_ratio
__a = hidden_dropout_prob
__a = drop_path_rate
__a = use_layerscale
__a = layerscale_value
__a = use_post_layernorm
__a = use_post_layernorm_in_modulation
__a = normalize_modulator
__a = initializer_range
__a = layer_norm_eps
__a = encoder_stride
__a = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 45
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(lowerCAmelCase__ ) == 1:
return True
__a = series[1] - series[0]
for index in range(len(lowerCAmelCase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowercase ( lowerCAmelCase__ : list ) -> float:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
__a = 0
for val in series:
answer += val
return answer / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ : str = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def _lowercase ( __A ,__A ,__A=8 ):
'''simple docstring'''
__UpperCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase__ ( lowercase__):
def __init__( self , lowercase , lowercase , lowercase , ) -> Optional[Any]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
__UpperCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if latents is None:
__UpperCamelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__UpperCamelCase = latents.to(lowercase_ )
__UpperCamelCase = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , lowercase=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__UpperCamelCase = torch.device(f"cuda:{gpu_id}" )
__UpperCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def __lowerCamelCase ( self , lowercase=0 ) -> Optional[Any]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__UpperCamelCase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCamelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
__UpperCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ) -> Optional[Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase , lowercase , lowercase = 5_1_2 , lowercase = 5_1_2 , lowercase = 1_0_0 , lowercase = 4.0 , lowercase = 1 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , ) -> Dict:
__UpperCamelCase = self._execution_device
__UpperCamelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
__UpperCamelCase = torch.cat(lowercase_ , dim=0 )
__UpperCamelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
__UpperCamelCase = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
__UpperCamelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
__UpperCamelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
__UpperCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
__UpperCamelCase = self.scheduler.timesteps
__UpperCamelCase = self.unet.config.in_channels
__UpperCamelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
__UpperCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
__UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCamelCase = {"image_embeds": image_embeds}
__UpperCamelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
__UpperCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__UpperCamelCase = noise_pred.chunk(2 )
__UpperCamelCase = variance_pred.chunk(2 )
__UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
__UpperCamelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__UpperCamelCase = image * 0.5 + 0.5
__UpperCamelCase = image.clamp(0 , 1 )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 351
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = s.rsplit(__A ,__A )
return new.join(__A )
def _lowercase ( __A ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
__UpperCamelCase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f"{group_key}." ,f"{group_key}.group." )
if "res_path" in key:
__UpperCamelCase = key.replace("""res_path.""" ,"""res_path.path.""" )
if key.endswith(""".w""" ):
__UpperCamelCase = rreplace(__A ,""".w""" ,""".weight""" ,1 )
if key.endswith(""".b""" ):
__UpperCamelCase = rreplace(__A ,""".b""" ,""".bias""" ,1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=True ):
'''simple docstring'''
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(__A ):
__UpperCamelCase = torch.load(__A )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(__A )
if isinstance(__A ,__A ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(__A )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(__A )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(__A ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(__A )
hf_model.load_state_dict(__A )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(__A )
__UpperCamelCase = count_parameters(__A )
assert torch.allclose(__A ,__A ,atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__A )
else:
return hf_state_dict
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a__ : Dict = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 243
| 0
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( lowerCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(__UpperCAmelCase ) )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = [sequences]
lowerCAmelCase__ : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__UpperCAmelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase=ZeroShotClassificationArgumentHandler() ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : int = args_parser
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=TruncationStrategy.ONLY_FIRST ,**__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : List[Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
lowerCAmelCase__ : Optional[Any] = self.tokenizer.eos_token
try:
lowerCAmelCase__ : Dict = self.tokenizer(
__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,)
except Exception as e:
if "too short" in str(__UpperCAmelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCAmelCase__ : str = self.tokenizer(
__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,padding=__UpperCAmelCase ,truncation=TruncationStrategy.DO_NOT_TRUNCATE ,)
else:
raise e
return inputs
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str:
if kwargs.get("""multi_class""" ,__UpperCAmelCase ) is not None:
lowerCAmelCase__ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
lowerCAmelCase__ : str = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : Union[str, Any] = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : Union[str, Any] = kwargs["""hypothesis_template"""]
lowerCAmelCase__ : Dict = {}
if "multi_label" in kwargs:
lowerCAmelCase__ : List[str] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self ,__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase ,) -> List[Any]:
if len(__UpperCAmelCase ) == 0:
pass
elif len(__UpperCAmelCase ) == 1 and "candidate_labels" not in kwargs:
lowerCAmelCase__ : Union[str, Any] = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This example is {}." ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self._args_parser(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(__UpperCAmelCase ,__UpperCAmelCase ) ):
lowerCAmelCase__ : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__UpperCAmelCase ) - 1,
**model_input,
}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = inputs["""candidate_label"""]
lowerCAmelCase__ : Union[str, Any] = inputs["""sequence"""]
lowerCAmelCase__ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCAmelCase__ : Dict = self.model(**__UpperCAmelCase )
lowerCAmelCase__ : int = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> int:
lowerCAmelCase__ : List[str] = [outputs["""candidate_label"""] for outputs in model_outputs]
lowerCAmelCase__ : str = [outputs["""sequence"""] for outputs in model_outputs]
lowerCAmelCase__ : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
lowerCAmelCase__ : List[str] = logits.shape[0]
lowerCAmelCase__ : List[Any] = len(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = N // n
lowerCAmelCase__ : Any = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__UpperCAmelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCAmelCase__ : Tuple = self.entailment_id
lowerCAmelCase__ : Union[str, Any] = -1 if entailment_id == 0 else 0
lowerCAmelCase__ : Dict = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCAmelCase__ : Union[str, Any] = np.exp(__UpperCAmelCase ) / np.exp(__UpperCAmelCase ).sum(-1 ,keepdims=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCAmelCase__ : Any = reshaped_outputs[..., self.entailment_id]
lowerCAmelCase__ : Optional[Any] = np.exp(__UpperCAmelCase ) / np.exp(__UpperCAmelCase ).sum(-1 ,keepdims=__UpperCAmelCase )
lowerCAmelCase__ : str = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 37
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334
| 0
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 356
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61
| 0
|
# Algorithm for the pigeonhole sorting
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : List[Any] = min(SCREAMING_SNAKE_CASE_ ) # min() finds the minimum value
lowercase__ : Dict = max(SCREAMING_SNAKE_CASE_ ) # max() finds the maximum value
lowercase__ : Any = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowercase__ : List[str] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowercase__ : Union[str, Any] = 0
for count in range(SCREAMING_SNAKE_CASE_ ):
while holes[count] > 0:
holes[count] -= 1
lowercase__ : Union[str, Any] = count + min_val
i += 1
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Any = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(SCREAMING_SNAKE_CASE_ )
print('Sorted order is:' , ' '.join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
| 214
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case_ = TypeVar('''T''')
snake_case_ = TypeVar('''U''')
class SCREAMING_SNAKE_CASE__ (Generic[T, U] ):
def __init__( self , a , a):
lowercase__ : List[Any] = key
lowercase__ : List[Any] = val
lowercase__ : DoubleLinkedListNode[T, U] | None = None
lowercase__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self):
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next)}, has prev: {bool(self.prev)}"""
)
class SCREAMING_SNAKE_CASE__ (Generic[T, U] ):
def __init__( self):
lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(a , a)
lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(a , a)
lowercase__ , lowercase__ : Union[str, Any] = self.rear, self.head
def __repr__( self):
lowercase__ : Any = ['DoubleLinkedList']
lowercase__ : List[str] = self.head
while node.next is not None:
rep.append(str(a))
lowercase__ : Tuple = node.next
rep.append(str(self.rear))
return ",\n ".join(a)
def snake_case_ ( self , a):
lowercase__ : Optional[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase__ : Dict = node
lowercase__ : int = previous
lowercase__ : Union[str, Any] = node
lowercase__ : Optional[int] = self.rear
def snake_case_ ( self , a):
if node.prev is None or node.next is None:
return None
lowercase__ : Union[str, Any] = node.next
lowercase__ : Tuple = node.prev
lowercase__ : Union[str, Any] = None
lowercase__ : List[Any] = None
return node
class SCREAMING_SNAKE_CASE__ (Generic[T, U] ):
__lowerCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , a):
lowercase__ : DoubleLinkedList[T, U] = DoubleLinkedList()
lowercase__ : Optional[Any] = capacity
lowercase__ : Union[str, Any] = 0
lowercase__ : Tuple = 0
lowercase__ : int = 0
lowercase__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self):
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , a):
return key in self.cache
def snake_case_ ( self , a):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowercase__ : DoubleLinkedListNode[T, U] = self.cache[key]
lowercase__ : str = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(a)
return node.val
self.miss += 1
return None
def snake_case_ ( self , a , a):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase__ : Optional[int] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(a) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase__ : Optional[Any] = DoubleLinkedListNode(a , a)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase__ : Any = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
lowercase__ : Union[str, Any] = value
self.list.add(a)
@classmethod
def snake_case_ ( cls , a = 128):
def cache_decorator_inner(a) -> Callable[..., U]:
def cache_decorator_wrapper(*a) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase__ : Dict = LRUCache(a)
lowercase__ : str = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
lowercase__ : str = func(*a)
cls.decorator_function_to_instance_map[func].put(args[0] , a)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(a , 'cache_info' , a) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214
| 1
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCAmelCase__ = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
lowerCAmelCase__ = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Optional[int] , lowercase__ : List[Any]):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(lowercase__)
def __snake_case ( self : int , lowercase__ : Tuple):
'''simple docstring'''
lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(lowercase__).to(lowercase__)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
])
@slow
def __snake_case ( self : str , lowercase__ : List[str] , lowercase__ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = F"""facebook/wmt19-{pair}"""
lowerCAmelCase__ = self.get_tokenizer(lowercase__)
lowerCAmelCase__ = self.get_model(lowercase__)
lowerCAmelCase__ = bleu_data[pair]['src']
lowerCAmelCase__ = bleu_data[pair]['tgt']
lowerCAmelCase__ = tokenizer(lowercase__ , return_tensors='pt' , truncation=lowercase__ , padding='longest').to(lowercase__)
lowerCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ = tokenizer.batch_decode(
lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__)
lowerCAmelCase__ = calculate_bleu(lowercase__ , lowercase__)
print(lowercase__)
self.assertGreaterEqual(scores['bleu'] , lowercase__)
| 119
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 1_6 ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
lowerCAmelCase__ , padding='longest' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase__ ) == "1":
lowerCAmelCase__ = 2
# Initialize accelerator
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['lr']
lowerCAmelCase__ = int(config['num_epochs'] )
lowerCAmelCase__ = int(config['seed'] )
lowerCAmelCase__ = int(config['batch_size'] )
lowerCAmelCase__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**lowerCAmelCase__ )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCAmelCase__ = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCAmelCase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ )
def __lowerCamelCase ( ):
lowerCAmelCase__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 119
| 1
|
"""simple docstring"""
from math import ceil, sqrt
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__SCREAMING_SNAKE_CASE = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__SCREAMING_SNAKE_CASE = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"{solution() = }")
| 54
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any = CLIPTokenizer
snake_case__ : Dict = CLIPTokenizerFast
snake_case__ : List[Any] = True
snake_case__ : Optional[Any] = {}
snake_case__ : Dict = False
def UpperCAmelCase_ ( self : Any ) -> Any:
super().setUp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
__SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] , **UpperCAmelCase__ : Tuple ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any , **UpperCAmelCase__ : Optional[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[str]:
__SCREAMING_SNAKE_CASE = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@require_ftfy
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__SCREAMING_SNAKE_CASE = "xa\u0303y" + " " + "x\xe3y"
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on unicode of space type
__SCREAMING_SNAKE_CASE = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on unicode of line break type
__SCREAMING_SNAKE_CASE = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__SCREAMING_SNAKE_CASE = F"""{text_of_1_token} {text_of_1_token}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
__SCREAMING_SNAKE_CASE = F""" {text}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ) + 1, 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCAmelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
super().test_tokenization_python_rust_equals()
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
# CLIP always lower cases letters
pass
| 54
| 1
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE ): # noqa: E741
"""simple docstring"""
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = 0
UpperCamelCase = [0] * n
UpperCamelCase = [False] * n
UpperCamelCase = [False] * n
def dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if parent == root:
out_edge_count += 1
UpperCamelCase = True
UpperCamelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCamelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCamelCase = True
# AP found via cycle
if at == low[to]:
UpperCamelCase = True
else:
UpperCamelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
UpperCamelCase = 0
UpperCamelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
lowerCAmelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 244
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.01 , _SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = search_prob
UpperCamelCase = start_temperate
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = None
while not search_end:
UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase = current_state
scores.append(_SCREAMING_SNAKE_CASE )
iterations += 1
UpperCamelCase = None
UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
UpperCamelCase = neighbors.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase = picked_neighbor
else:
UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase = picked_neighbor
UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase = True
else:
UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 244
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_a = logging.get_logger(__name__)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["pixel_values"]
def __init__( self : Optional[int], UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Dict[str, int]] = None, UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, **UpperCAmelCase__ : Any, ):
super().__init__(**UpperCAmelCase__ )
__lowercase = size if size is not None else {"shortest_edge": 2_5_6}
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
__lowercase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowercase = get_size_dict(UpperCAmelCase__, param_name="crop_size" )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Dict[str, int], UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Union[str, Any], ):
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowercase = get_resize_output_image_size(UpperCAmelCase__, size=size["shortest_edge"], default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Dict[str, int], UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Optional[int], ):
__lowercase = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__, size=(size["height"], size["width"]), data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : str, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : float, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Union[str, Any] ):
return rescale(UpperCAmelCase__, scale=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Union[float, List[float]], UpperCAmelCase__ : Union[float, List[float]], UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Tuple, ):
return normalize(UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : ImageInput, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : PILImageResampling = None, UpperCAmelCase__ : bool = None, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[float] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[str, TensorType]] = None, UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST, **UpperCAmelCase__ : Optional[Any], ):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(UpperCAmelCase__, param_name="crop_size" )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=UpperCAmelCase__, size=UpperCAmelCase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=UpperCAmelCase__, scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__ ) for image in images]
__lowercase = [to_channel_dimension_format(UpperCAmelCase__, UpperCAmelCase__ ) for image in images]
__lowercase = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__, tensor_type=UpperCAmelCase__ )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Tuple] = None ):
__lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase__ ):
__lowercase = target_sizes.numpy()
__lowercase = []
for idx in range(len(UpperCAmelCase__ ) ):
__lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="bilinear", align_corners=UpperCAmelCase__ )
__lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
__lowercase = logits.argmax(dim=1 )
__lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = ["""vqvae"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->List[str]:
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
return 50 if isinstance(self.scheduler , __UpperCAmelCase) else 10_00
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=True , ) ->Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
a_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCAmelCase)
a_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCAmelCase , device=self.device , )
a_ = noise
a_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase)
a_ = self.mel.audio_slice_to_image(__UpperCAmelCase)
a_ = np.frombuffer(input_image.tobytes() , dtype="uint8").reshape(
(input_image.height, input_image.width))
a_ = (input_image / 2_55) * 2 - 1
a_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a_ = self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0)).latent_dist.sample(
generator=__UpperCAmelCase)[0]
a_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a_ = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1])
a_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a_ = int(mask_start_secs * pixels_per_second)
a_ = int(mask_end_secs * pixels_per_second)
a_ = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __UpperCAmelCase):
a_ = self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)["sample"]
else:
a_ = self.unet(__UpperCAmelCase , __UpperCAmelCase)["sample"]
if isinstance(self.scheduler , __UpperCAmelCase):
a_ = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"]
else:
a_ = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
a_ = mask[:, step, :, :mask_start]
if mask_end > 0:
a_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a_ = 1 / self.vqvae.config.scaling_factor * images
a_ = self.vqvae.decode(__UpperCAmelCase)["sample"]
a_ = (images / 2 + 0.5).clamp(0 , 1)
a_ = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a_ = (images * 2_55).round().astype("uint8")
a_ = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCAmelCase , mode="RGB").convert("L") for _ in images))
a_ = [self.mel.image_to_audio(__UpperCAmelCase) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase)[:, np.newaxis, :]) , **ImagePipelineOutput(__UpperCAmelCase))
@torch.no_grad()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 50) ->np.ndarray:
assert isinstance(self.scheduler , __UpperCAmelCase)
self.scheduler.set_timesteps(__UpperCAmelCase)
a_ = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8").reshape((1, image.height, image.width)) for image in images])
a_ = (sample / 2_55) * 2 - 1
a_ = torch.Tensor(__UpperCAmelCase).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a_ = self.scheduler.alphas_cumprod[t]
a_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a_ = 1 - alpha_prod_t
a_ = self.unet(__UpperCAmelCase , __UpperCAmelCase)["sample"]
a_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->torch.Tensor:
a_ = acos(torch.dot(torch.flatten(__UpperCAmelCase) , torch.flatten(__UpperCAmelCase)) / torch.norm(__UpperCAmelCase) / torch.norm(__UpperCAmelCase))
return sin((1 - alpha) * theta) * xa / sin(__UpperCAmelCase) + sin(alpha * theta) * xa / sin(__UpperCAmelCase)
| 243
| 0
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __magic_name__ :
def __init__( self : Union[str, Any] , snake_case__ : str , snake_case__ : Union[str, Any]=1_3 , snake_case__ : List[Any]=3_2 , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[Any]=3 , snake_case__ : Optional[int]=1_6 , snake_case__ : int=[1, 2, 1] , snake_case__ : Dict=[2, 2, 4] , snake_case__ : Union[str, Any]=2 , snake_case__ : Tuple=2.0 , snake_case__ : Optional[int]=True , snake_case__ : Optional[int]=0.0 , snake_case__ : int=0.0 , snake_case__ : int=0.1 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Optional[int]=False , snake_case__ : Tuple=True , snake_case__ : Optional[Any]=0.02 , snake_case__ : Union[str, Any]=1e-5 , snake_case__ : Optional[int]=True , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=True , snake_case__ : str=1_0 , snake_case__ : List[Any]=8 , snake_case__ : int=["stage1", "stage2", "stage3"] , snake_case__ : List[Any]=[1, 2, 3] , ):
'''simple docstring'''
lowercase :Union[str, Any] = parent
lowercase :Optional[Any] = batch_size
lowercase :Optional[int] = image_size
lowercase :int = patch_size
lowercase :List[str] = num_channels
lowercase :Union[str, Any] = embed_dim
lowercase :int = depths
lowercase :int = num_heads
lowercase :str = window_size
lowercase :int = mlp_ratio
lowercase :List[Any] = qkv_bias
lowercase :int = hidden_dropout_prob
lowercase :int = attention_probs_dropout_prob
lowercase :Any = drop_path_rate
lowercase :Optional[Any] = hidden_act
lowercase :List[str] = use_absolute_embeddings
lowercase :List[Any] = patch_norm
lowercase :Optional[Any] = layer_norm_eps
lowercase :List[str] = initializer_range
lowercase :str = is_training
lowercase :Dict = scope
lowercase :Tuple = use_labels
lowercase :Optional[int] = type_sequence_label_size
lowercase :str = encoder_stride
lowercase :str = out_features
lowercase :List[Any] = out_indices
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :List[str] = None
if self.use_labels:
lowercase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :Tuple = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Dict ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __snake_case ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :List[str] = MaskFormerSwinModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowercase :str = model(_UpperCamelCase )
lowercase :Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase :Optional[int] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __snake_case ( self : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :int = MaskFormerSwinBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowercase :Dict = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(_UpperCamelCase ):
lowercase :Optional[Any] = ["""stem"""]
lowercase :Optional[int] = MaskFormerSwinBackbone(config=_UpperCamelCase )
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :Optional[Any] = self.prepare_config_and_inputs()
lowercase :Optional[int] = config_and_inputs
lowercase :Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowercase__ , lowercase__ , unittest.TestCase ):
__A : Any = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__A : List[Any] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__A : Any = False
__A : Union[str, Any] = False
__A : Optional[Any] = False
__A : Optional[int] = False
__A : List[str] = False
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :str = MaskFormerSwinModelTester(self )
lowercase :List[str] = ConfigTester(self , config_class=_UpperCamelCase , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def __snake_case ( self : Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def __snake_case ( self : int ):
'''simple docstring'''
pass
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Any = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Union[str, Any] = model_class(_UpperCamelCase )
lowercase :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :List[str] = [*signature.parameters.keys()]
lowercase :int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def __snake_case ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __snake_case ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :Union[str, Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase :Optional[Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
lowercase :Dict = outputs.hidden_states
lowercase :Union[str, Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# Swin has a different seq_length
lowercase :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase :Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase :Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase :Optional[int] = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :List[str] = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase :int = 3
lowercase :Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase :List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase :Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase :Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase :Any = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :Optional[int] = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def __snake_case ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __snake_case ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case__ : Optional[Any] ):
lowercase :Optional[int] = 0
return t
def check_equivalence(snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Any={} ):
with torch.no_grad():
lowercase :Any = model(**_UpperCamelCase , return_dict=_UpperCamelCase , **_UpperCamelCase )
lowercase :Any = model(**_UpperCamelCase , return_dict=_UpperCamelCase , **_UpperCamelCase ).to_tuple()
def recursive_check(snake_case__ : List[str] , snake_case__ : Any ):
if isinstance(_UpperCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCamelCase , _UpperCamelCase ):
recursive_check(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_UpperCamelCase , _UpperCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCamelCase ) , set_nan_tensor_to_zero(_UpperCamelCase ) , atol=1e-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(_UpperCamelCase ).any()} and `inf`: {torch.isinf(_UpperCamelCase )}. Dict has"""
f""" `nan`: {torch.isnan(_UpperCamelCase ).any()} and `inf`: {torch.isinf(_UpperCamelCase )}."""
) , )
recursive_check(_UpperCamelCase , _UpperCamelCase )
for model_class in self.all_model_classes:
lowercase :Tuple = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowercase :Dict = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
lowercase :List[str] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase :str = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowercase :Optional[Any] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase :Tuple = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
lowercase :int = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {'''output_hidden_states''': True} )
lowercase :List[Any] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowercase :Optional[Any] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {'''output_hidden_states''': True} )
@require_torch
class __magic_name__ ( unittest.TestCase , lowercase__ ):
__A : List[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__A : List[str] = MaskFormerSwinConfig
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Optional[Any] = MaskFormerSwinModelTester(self )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase :List[str] = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
lowercase :Tuple = backbone_class(_UpperCamelCase )
backbone.to(_UpperCamelCase )
backbone.eval()
lowercase :Union[str, Any] = backbone(**_UpperCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _UpperCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase :List[str] = backbone(**_UpperCamelCase , output_hidden_states=_UpperCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase :Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase :List[str] = backbone(**_UpperCamelCase , output_attentions=_UpperCamelCase )
self.assertIsNotNone(outputs.attentions )
| 358
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "swin2sr"
__A : Dict = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , snake_case__ : List[str]=6_4 , snake_case__ : Union[str, Any]=1 , snake_case__ : Tuple=3 , snake_case__ : int=1_8_0 , snake_case__ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , snake_case__ : List[str]=[6, 6, 6, 6, 6, 6] , snake_case__ : Tuple=8 , snake_case__ : List[Any]=2.0 , snake_case__ : Any=True , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.1 , snake_case__ : Dict="gelu" , snake_case__ : Optional[int]=False , snake_case__ : Any=0.02 , snake_case__ : Any=1e-5 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]="1conv" , snake_case__ : List[str]="pixelshuffle" , **snake_case__ : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Dict = image_size
lowercase :List[str] = patch_size
lowercase :Tuple = num_channels
lowercase :int = embed_dim
lowercase :Any = depths
lowercase :Union[str, Any] = len(snake_case__ )
lowercase :List[str] = num_heads
lowercase :int = window_size
lowercase :Tuple = mlp_ratio
lowercase :List[Any] = qkv_bias
lowercase :Optional[int] = hidden_dropout_prob
lowercase :Tuple = attention_probs_dropout_prob
lowercase :Tuple = drop_path_rate
lowercase :Optional[Any] = hidden_act
lowercase :Union[str, Any] = use_absolute_embeddings
lowercase :Dict = layer_norm_eps
lowercase :Optional[Any] = initializer_range
lowercase :Optional[Any] = upscale
lowercase :Any = img_range
lowercase :Optional[int] = resi_connection
lowercase :Union[str, Any] = upsampler
| 172
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__lowerCAmelCase : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Any ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
__SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
__SCREAMING_SNAKE_CASE : Tuple = VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[int] = generator.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __magic_name__( self :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = "cyberpunk 2077"
__SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__SCREAMING_SNAKE_CASE : Any = "A painting of a squirrel eating a burger "
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = pipe.text_to_image(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(lowercase_ , generator=lowercase_ , output_type='''numpy''' ).images
__SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 9
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_a = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
_a = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
_a = '▁'
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""input_ids""", """token_type_ids"""]
SCREAMING_SNAKE_CASE__ : Tuple = FNetTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_="<unk>" , lowercase_="[SEP]" , lowercase_="<pad>" , lowercase_="[CLS]" , lowercase_="[MASK]" , **lowercase_ , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : int = (
AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ )
if isinstance(lowercase_ , lowercase_ )
else mask_token
)
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : Any = do_lower_case
UpperCAmelCase_ : Tuple = remove_space
UpperCAmelCase_ : str = keep_accents
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : List[Any] = False if not self.vocab_file else True
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : List[str] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 61
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCamelCase_ = 250004
UpperCamelCase_ = 250020
@require_sentencepiece
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = MBartaaTokenizer
A__ : str = MBartaaTokenizerFast
A__ : Tuple = True
A__ : List[str] = True
def A__ ( self: Optional[int] ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Union[str, Any] = MBartaaTokenizer(lowerCamelCase_ ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ,keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self: List[Any] ) -> Tuple:
UpperCAmelCase_ : Tuple = """<s>"""
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-1] ,"""<mask>""" )
self.assertEqual(len(lowerCamelCase_ ) ,1054 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size ,1054 )
def A__ ( self: List[Any] ) -> Tuple:
UpperCAmelCase_ : Any = MBartaaTokenizer(lowerCamelCase_ ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ,keep_accents=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
UpperCAmelCase_ : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] ,)
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] ,)
@slow
def A__ ( self: Optional[int] ) -> Union[str, Any]:
# fmt: off
UpperCAmelCase_ : Dict = {"""input_ids""": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ ,model_name="""facebook/mbart-large-50""" ,revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" ,)
def A__ ( self: Dict ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase_ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : int = tempfile.mkdtemp()
UpperCAmelCase_ : Any = tokenizer_r.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase_ : Any = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCamelCase_ ,lowerCamelCase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase_ : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ ,lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : int = tokenizer_r.save_pretrained(lowerCamelCase_ ,legacy_format=lowerCamelCase_ )
UpperCAmelCase_ : str = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ ,lowerCamelCase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase_ : List[str] = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ ,lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase_ : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ ,legacy_format=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_ : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ ,lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = "facebook/mbart-large-50-one-to-many-mmt"
A__ : Tuple = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
A__ : int = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
A__ : Optional[int] = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def A__ ( cls: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" )
UpperCAmelCase_ : str = 1
return cls
def A__ ( self: Optional[Any] ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] ,250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] ,250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] ,250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] ,250038 )
def A__ ( self: Any ) -> str:
UpperCAmelCase_ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> int:
self.assertIn(lowerCamelCase_ ,self.tokenizer.all_special_ids )
UpperCAmelCase_ : List[str] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase_ : Tuple = self.tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : int = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = 10
UpperCAmelCase_ : Optional[int] = self.tokenizer(lowerCamelCase_ ,max_length=lowerCamelCase_ ,truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] ,lowerCamelCase_ )
self.assertEqual(ids[-1] ,2 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Union[str, Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[250053, 250001] )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : List[str] = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowerCamelCase_ )
@require_torch
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : Union[str, Any] = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : int = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
UpperCAmelCase_ : Union[str, Any] = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
UpperCAmelCase_ : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase_ )
self.assertEqual(2 ,batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def A__ ( self: int ) -> str:
UpperCAmelCase_ : str = self.tokenizer(self.src_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=3 ,return_tensors="""pt""" )
UpperCAmelCase_ : List[str] = self.tokenizer(
text_target=self.tgt_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=10 ,return_tensors="""pt""" )
UpperCAmelCase_ : Optional[int] = targets["""input_ids"""]
UpperCAmelCase_ : str = shift_tokens_right(lowerCamelCase_ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) ,{
# en_XX, A, test, EOS
"""input_ids""": [[250004, 62, 3034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} ,)
| 59
|
import random
from typing import Any
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
UpperCAmelCase_ : List[str] = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ : Any = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 59
| 1
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> str | Literal[False]:
UpperCamelCase : Tuple = list(snake_case__ )
UpperCamelCase : Optional[Any] = list(snake_case__ )
UpperCamelCase : Tuple = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count += 1
UpperCamelCase : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(snake_case__ )
def UpperCamelCase ( snake_case__ : list[str] ) -> list[str]:
UpperCamelCase : Tuple = []
while True:
UpperCamelCase : Union[str, Any] = ['$'] * len(snake_case__ )
UpperCamelCase : Tuple = []
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
UpperCamelCase : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
UpperCamelCase : Dict = '*'
UpperCamelCase : Dict = '*'
temp.append('X' )
for i in range(len(snake_case__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case__ ) == 0:
return pi
UpperCamelCase : Any = list(set(snake_case__ ) )
def UpperCamelCase ( snake_case__ : int , snake_case__ : Sequence[float] ) -> list[str]:
UpperCamelCase : Any = []
for minterm in minterms:
UpperCamelCase : str = ''
for _ in range(snake_case__ ):
UpperCamelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case__ )
return temp
def UpperCamelCase ( snake_case__ : str , snake_case__ : str , snake_case__ : int ) -> bool:
UpperCamelCase : Optional[int] = list(snake_case__ )
UpperCamelCase : Dict = list(snake_case__ )
UpperCamelCase : Tuple = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ : list[list[int]] , snake_case__ : list[str] ) -> list[str]:
UpperCamelCase : Any = []
UpperCamelCase : str = [0] * len(snake_case__ )
for i in range(len(chart[0] ) ):
UpperCamelCase : Dict = 0
UpperCamelCase : Tuple = -1
for j in range(len(snake_case__ ) ):
if chart[j][i] == 1:
count += 1
UpperCamelCase : str = j
if count == 1:
UpperCamelCase : str = 1
for i in range(len(snake_case__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case__ ) ):
UpperCamelCase : str = 0
temp.append(prime_implicants[i] )
while True:
UpperCamelCase : Dict = 0
UpperCamelCase : Any = -1
UpperCamelCase : Any = 0
for i in range(len(snake_case__ ) ):
UpperCamelCase : Tuple = chart[i].count(1 )
if count_n > max_n:
UpperCamelCase : str = count_n
UpperCamelCase : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case__ ) ):
UpperCamelCase : int = 0
def UpperCamelCase ( snake_case__ : list[str] , snake_case__ : list[str] ) -> list[list[int]]:
UpperCamelCase : int = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )]
for i in range(len(snake_case__ ) ):
UpperCamelCase : List[str] = prime_implicants[i].count('_' )
for j in range(len(snake_case__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ):
UpperCamelCase : Any = 1
return chart
def UpperCamelCase ( ) -> None:
UpperCamelCase : Optional[Any] = int(input('Enter the no. of variables\n' ) )
UpperCamelCase : List[Any] = [
float(snake_case__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
UpperCamelCase : List[str] = decimal_to_binary(snake_case__ , snake_case__ )
UpperCamelCase : Tuple = check(snake_case__ )
print('Prime Implicants are:' )
print(snake_case__ )
UpperCamelCase : List[Any] = prime_implicant_chart(snake_case__ , snake_case__ )
UpperCamelCase : int = selection(snake_case__ , snake_case__ )
print('Essential Prime Implicants are:' )
print(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 119
|
from __future__ import annotations
__UpperCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Union[str, Any] = graph
# mapping node to its parent in resulting breadth first tree
UpperCamelCase : dict[str, str | None] = {}
UpperCamelCase : Union[str, Any] = source_vertex
def snake_case_ ( self ) -> None:
UpperCamelCase : str = {self.source_vertex}
UpperCamelCase : str = None
UpperCamelCase : int = [self.source_vertex] # first in first out queue
while queue:
UpperCamelCase : Dict = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = vertex
queue.append(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCamelCase : Optional[Any] = self.parent.get(SCREAMING_SNAKE_CASE_ )
if target_vertex_parent is None:
UpperCamelCase : Union[str, Any] = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
return self.shortest_path(SCREAMING_SNAKE_CASE_ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 119
| 1
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__magic_name__ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}." )
if not ops[op](version.parse(UpperCamelCase_ ) , version.parse(UpperCamelCase_ ) ):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = None ):
__SCREAMING_SNAKE_CASE = f"\n{hint}" if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = requirement, None, None
else:
__SCREAMING_SNAKE_CASE = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , UpperCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f" got {requirement}" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = match[0]
__SCREAMING_SNAKE_CASE = want_full.split(""",""" ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE = {}
for w in want_range:
__SCREAMING_SNAKE_CASE = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , UpperCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f" but got {requirement}" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = match[0]
__SCREAMING_SNAKE_CASE = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE = """.""".join([str(UpperCamelCase_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE = importlib.metadata.version(UpperCamelCase_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(UpperCamelCase_ , UpperCamelCase_ )
| 255
|
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [x.strip() for x in open(UpperCamelCase_ ).readlines()]
__SCREAMING_SNAKE_CASE = [x.strip() for x in open(UpperCamelCase_ ).readlines()][: len(UpperCamelCase_ )]
__SCREAMING_SNAKE_CASE = calculate_rouge(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
if save_path is not None:
save_json(UpperCamelCase_ , UpperCamelCase_ , indent=UpperCamelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 255
| 1
|
from __future__ import annotations
def __magic_name__ ( __a : list[int] , __a : int ):
'''simple docstring'''
if len(__a ) == 0:
return False
UpperCamelCase__ = len(__a ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __a )
else:
return binary_search(a_list[midpoint + 1 :] , __a )
if __name__ == "__main__":
lowerCamelCase_ = input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase_ = [int(item.strip()) for item in user_input.split(''',''')]
lowerCamelCase_ = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase_ = '''''' if binary_search(sequence, target) else '''not '''
print(f'{target} was {not_str}found in {sequence}')
| 244
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 10
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps[0]
UpperCamelCase__ = scheduler.timesteps[1]
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ (self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1_06, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 1, 0]
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 244
| 1
|
'''simple docstring'''
import math
import sys
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
if number != int(lowerCAmelCase_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
_a : Optional[int] = [-1] * (number + 1)
_a : Optional[int] = 0
for i in range(1 , number + 1 ):
_a : Optional[Any] = sys.maxsize
_a : Optional[Any] = int(math.sqrt(lowerCAmelCase_ ) )
for j in range(1 , root + 1 ):
_a : Tuple = 1 + answers[i - (j**2)]
_a : List[Any] = min(lowerCAmelCase_ , lowerCAmelCase_ )
_a : str = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCAmelCase = 3
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
print('Generating primitive root of p' )
while True:
_a : List[Any] = random.randrange(3 , lowerCAmelCase_ )
if pow(lowerCAmelCase_ , 2 , lowerCAmelCase_ ) == 1:
continue
if pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) == 1:
continue
return g
def __lowerCamelCase ( lowerCAmelCase_ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...' )
_a : int = rabin_miller.generate_large_prime(lowerCAmelCase_ ) # select large prime number.
_a : List[str] = primitive_root(lowerCAmelCase_ ) # one primitive root on modulo p.
_a : Any = random.randrange(3 , lowerCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
_a : List[Any] = cryptomath.find_mod_inverse(pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
_a : Tuple = (key_size, e_a, e_a, p)
_a : str = (key_size, d)
return public_key, private_key
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_a , _a : Dict = generate_key(lowerCAmelCase_ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def __lowerCamelCase ( ) -> None:
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 107
| 1
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCAmelCase__ = '''src/transformers'''
UpperCAmelCase__ = '''docs/source/en'''
UpperCAmelCase__ = '''.'''
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowercase =f.readlines()
# Find the start prompt.
_lowercase =0
while not lines[start_index].startswith(__snake_case ):
start_index += 1
start_index += 1
_lowercase =start_index
while not lines[end_index].startswith(__snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCAmelCase__ = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
UpperCAmelCase__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def UpperCAmelCase_ ( __snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __snake_case )
return [m.group(0 ) for m in matches]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> str:
"""simple docstring"""
_lowercase =2 if text == '''✅''' or text == '''❌''' else len(__snake_case )
_lowercase =(width - text_length) // 2
_lowercase =width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def UpperCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_lowercase ={
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_lowercase ={name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_lowercase =collections.defaultdict(__snake_case )
_lowercase =collections.defaultdict(__snake_case )
_lowercase =collections.defaultdict(__snake_case )
_lowercase =collections.defaultdict(__snake_case )
_lowercase =collections.defaultdict(__snake_case )
# Let's lookup through all transformers object (once).
for attr_name in dir(__snake_case ):
_lowercase =None
if attr_name.endswith('''Tokenizer''' ):
_lowercase =slow_tokenizers
_lowercase =attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
_lowercase =fast_tokenizers
_lowercase =attr_name[:-13]
elif _re_tf_models.match(__snake_case ) is not None:
_lowercase =tf_models
_lowercase =_re_tf_models.match(__snake_case ).groups()[0]
elif _re_flax_models.match(__snake_case ) is not None:
_lowercase =flax_models
_lowercase =_re_flax_models.match(__snake_case ).groups()[0]
elif _re_pt_models.match(__snake_case ) is not None:
_lowercase =pt_models
_lowercase =_re_pt_models.match(__snake_case ).groups()[0]
if lookup_dict is not None:
while len(__snake_case ) > 0:
if attr_name in model_name_to_prefix.values():
_lowercase =True
break
# Try again after removing the last word in the name
_lowercase =''''''.join(camel_case_split(__snake_case )[:-1] )
# Let's build that table!
_lowercase =list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_lowercase =['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_lowercase =[len(__snake_case ) + 2 for c in columns]
_lowercase =max([len(__snake_case ) for name in model_names] ) + 2
# Build the table per se
_lowercase ='''|''' + '''|'''.join([_center_text(__snake_case , __snake_case ) for c, w in zip(__snake_case , __snake_case )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
_lowercase ={True: '''✅''', False: '''❌'''}
for name in model_names:
_lowercase =model_name_to_prefix[name]
_lowercase =[
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__snake_case , __snake_case ) for l, w in zip(__snake_case , __snake_case )] ) + "|\n"
return table
def UpperCAmelCase_ ( __snake_case=False ) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase =_find_text_in_file(
filename=os.path.join(__snake_case , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
_lowercase =get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__snake_case , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 5
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Any = """Wav2Vec2FeatureExtractor"""
UpperCAmelCase : List[str] = """AutoTokenizer"""
def __init__(self : int , _A : List[str] , _A : str) -> str:
super().__init__(_A , _A)
__snake_case : Tuple = self.feature_extractor
__snake_case : str = False
@classmethod
def _lowercase (cls : Union[str, Any] , _A : Optional[Any] , **_A : str) -> List[Any]:
try:
return super().from_pretrained(_A , **_A)
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , _A , )
__snake_case : List[str] = WavaVecaFeatureExtractor.from_pretrained(_A , **_A)
__snake_case : Any = WavaVecaCTCTokenizer.from_pretrained(_A , **_A)
return cls(feature_extractor=_A , tokenizer=_A)
def __call__(self : int , *_A : List[Any] , **_A : str) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
__snake_case : int = kwargs.pop('raw_speech')
else:
__snake_case : Optional[Any] = kwargs.pop('audio' , _A)
__snake_case : Tuple = kwargs.pop('sampling_rate' , _A)
__snake_case : Any = kwargs.pop('text' , _A)
if len(_A) > 0:
__snake_case : Any = args[0]
__snake_case : Dict = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
__snake_case : str = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A)
if text is not None:
__snake_case : List[str] = self.tokenizer(_A , **_A)
if text is None:
return inputs
elif audio is None:
return encodings
else:
__snake_case : List[str] = encodings['input_ids']
return inputs
def _lowercase (self : str , *_A : Optional[Any] , **_A : int) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A)
__snake_case : Optional[int] = kwargs.pop('input_features' , _A)
__snake_case : List[Any] = kwargs.pop('labels' , _A)
if len(_A) > 0:
__snake_case : Tuple = args[0]
__snake_case : Union[str, Any] = args[1:]
if input_features is not None:
__snake_case : Optional[Any] = self.feature_extractor.pad(_A , *_A , **_A)
if labels is not None:
__snake_case : Tuple = self.tokenizer.pad(_A , **_A)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__snake_case : str = labels['input_ids']
return input_features
def _lowercase (self : Union[str, Any] , *_A : Any , **_A : List[Any]) -> List[Any]:
return self.tokenizer.batch_decode(*_A , **_A)
def _lowercase (self : Union[str, Any] , *_A : Dict , **_A : Union[str, Any]) -> Any:
return self.tokenizer.decode(*_A , **_A)
@contextmanager
def _lowercase (self : List[str]) -> int:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
__snake_case : Dict = True
__snake_case : Union[str, Any] = self.tokenizer
yield
__snake_case : Optional[Any] = self.feature_extractor
__snake_case : int = False
| 172
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : bool = False ) ->str:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
a : List[Any] = F"""Expected string as input, found {type(_lowercase )}"""
raise ValueError(_lowercase )
if not isinstance(_lowercase , _lowercase ):
a : int = F"""Expected boolean as use_pascal parameter, found {type(_lowercase )}"""
raise ValueError(_lowercase )
a : Optional[Any] = input_str.split("_" )
a : List[str] = 0 if use_pascal else 1
a : List[str] = words[start_index:]
a : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
a : Tuple = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 79
|
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 79
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( A_ ):
A__ : Union[str, Any] = ["image_processor", "tokenizer"]
A__ : Tuple = "ChineseCLIPImageProcessor"
A__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__(self : int , snake_case__ : Optional[int]=None , snake_case__ : int=None , **snake_case__ : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case__ , )
snake_case : Optional[Any] = kwargs.pop("feature_extractor" )
snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case__ , snake_case__ )
snake_case : List[Any] = self.image_processor
def __call__(self : Tuple , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , **snake_case__ : Tuple ) -> int:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
snake_case : List[str] = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
snake_case : List[str] = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
snake_case : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Any ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , *snake_case__ : Optional[Any] , **snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case : int = self.tokenizer.model_input_names
snake_case : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Tuple:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case__ , )
return self.image_processor_class
| 59
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> List[str]:
'''simple docstring'''
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(snake_case__ ) for s in shape] )}.npy"""
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[Any]=0 , snake_case__ : Any=(4, 4, 64, 64) , snake_case__ : List[Any]=False ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
snake_case : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ )
return image
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple=False , snake_case__ : List[Any]="CompVis/stable-diffusion-v1-4" ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = jnp.bfloataa if fpaa else jnp.floataa
snake_case : str = "bf16" if fpaa else None
snake_case , snake_case : Optional[int] = FlaxUNetaDConditionModel.from_pretrained(
snake_case__ , subfolder="unet" , dtype=snake_case__ , revision=snake_case__ )
return model, params
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=(4, 77, 7_68) , snake_case__ : Dict=False ) -> List[str]:
'''simple docstring'''
snake_case : Any = jnp.bfloataa if fpaa else jnp.floataa
snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : List[str] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case__ )
snake_case : Union[str, Any] = self.get_latents(snake_case__ , fpaa=snake_case__ )
snake_case : List[str] = self.get_encoder_hidden_states(snake_case__ , fpaa=snake_case__ )
snake_case : Dict = model.apply(
{"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample
assert sample.shape == latents.shape
snake_case : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case : Optional[int] = jnp.array(snake_case__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple ) -> str:
'''simple docstring'''
snake_case , snake_case : List[Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case__ )
snake_case : List[str] = self.get_latents(snake_case__ , shape=(4, 4, 96, 96) , fpaa=snake_case__ )
snake_case : Union[str, Any] = self.get_encoder_hidden_states(snake_case__ , shape=(4, 77, 10_24) , fpaa=snake_case__ )
snake_case : Optional[int] = model.apply(
{"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample
assert sample.shape == latents.shape
snake_case : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case : Dict = jnp.array(snake_case__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
| 59
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : int ) -> Union[str, Any]:
snake_case__ : Any = checkpoints.load_tax_checkpoint(_lowercase )
snake_case__ : Tuple = flatten_dict(_lowercase )
return flax_params
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ) -> int:
snake_case__ : Optional[Any] = {}
snake_case__ : List[str] = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
snake_case__ : List[str] = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case__ : Union[str, Any] = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case__ : Union[str, Any] = new_key.replace(_lowercase , _lowercase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case__ : Tuple = new_key.replace(_lowercase , _lowercase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case__ : int = re.sub(R"layers_(\d+)" , R"layer.\1" , _lowercase )
snake_case__ : Union[str, Any] = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case__ : Dict = re.sub(R"layers_(\d+)" , R"layer.\1" , _lowercase )
snake_case__ : int = flax_dict[key]
snake_case__ : List[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case__ : str = torch.from_numpy(converted_dict[key].T )
else:
snake_case__ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[int]=False ) -> Union[str, Any]:
snake_case__ : int = get_flax_param(_lowercase )
if not use_large:
snake_case__ : int = PixaStructVisionConfig()
snake_case__ : Optional[int] = PixaStructTextConfig()
else:
snake_case__ : Optional[Any] = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
snake_case__ : int = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
snake_case__ : Tuple = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_lowercase )
snake_case__ : str = PixaStructForConditionalGeneration(_lowercase )
snake_case__ : Union[str, Any] = rename_and_convert_flax_params(_lowercase )
model.load_state_dict(_lowercase )
snake_case__ : Any = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
snake_case__ : str = PixaStructImageProcessor()
snake_case__ : Optional[Any] = PixaStructProcessor(image_processor=_lowercase , tokenizer=_lowercase )
if use_large:
snake_case__ : Union[str, Any] = 4096
snake_case__ : int = True
# mkdir if needed
os.makedirs(_lowercase , exist_ok=_lowercase )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
print("Model saved in {}".format(_lowercase ) )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
__lowerCamelCase : str = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 359
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : Any = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Optional[int] = {
"""allenai/led-base-16384""": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = LEDTokenizer
a_ = ["input_ids", "attention_mask"]
def __init__( self : int , __A : Optional[int]=None , __A : Any=None , __A : Any=None , __A : int="replace" , __A : Optional[Any]="<s>" , __A : List[Any]="</s>" , __A : Dict="</s>" , __A : Union[str, Any]="<s>" , __A : List[Any]="<unk>" , __A : Optional[Any]="<pad>" , __A : Optional[int]="<mask>" , __A : str=False , __A : Optional[Any]=True , **__A : List[Any] , ):
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
snake_case__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : List[str] = getattr(__A , pre_tok_state.pop("type" ) )
snake_case__ : Tuple = add_prefix_space
snake_case__ : Any = pre_tok_class(**__A )
snake_case__ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case__ : str = "post_processor"
snake_case__ : Union[str, Any] = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
snake_case__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ : int = tuple(state["sep"] )
if "cls" in state:
snake_case__ : Optional[int] = tuple(state["cls"] )
snake_case__ : Union[str, Any] = False
if state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : Optional[Any] = add_prefix_space
snake_case__ : List[str] = True
if state.get("trim_offsets" , __A ) != trim_offsets:
snake_case__ : str = trim_offsets
snake_case__ : Tuple = True
if changes_to_apply:
snake_case__ : Any = getattr(__A , state.pop("type" ) )
snake_case__ : Optional[Any] = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowercase ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self : str , __A : int ):
snake_case__ : str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
snake_case__ : Optional[int] = value
def _lowercase ( self : List[str] , *__A : Union[str, Any] , **__A : int ):
snake_case__ : Dict = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A , **__A )
def _lowercase ( self : Union[str, Any] , *__A : Tuple , **__A : Union[str, Any] ):
snake_case__ : List[str] = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A , **__A )
def _lowercase ( self : Union[str, Any] , __A : str , __A : Optional[str] = None ):
snake_case__ : str = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _lowercase ( self : Union[str, Any] , __A : int , __A : str=None ):
snake_case__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Optional[Any] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : str , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Dict = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Optional[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : Dict = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : Optional[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Optional[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 286
| 0
|
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 255
|
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowercase : Optional[int] = len(_UpperCAmelCase ) + 1
lowercase : Any = len(_UpperCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowercase : Tuple = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )]
# since string of zero length match pattern of zero length
lowercase : List[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _UpperCAmelCase ):
lowercase : Tuple = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _UpperCAmelCase ):
lowercase : Tuple = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _UpperCAmelCase ):
for j in range(1 , _UpperCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowercase : List[str] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowercase : Any = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowercase : List[Any] = dp[i - 1][j]
else:
lowercase : Optional[int] = 0
else:
lowercase : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCamelCase: int = 'aab'
_UpperCamelCase: Tuple = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 255
| 1
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A__ : List[Any] =datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
_lowercase: int = 10000
_lowercase: Optional[List[str]] = None
_lowercase: Optional[datasets.Features] = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
_lowercase: Dict = ParquetConfig
def lowercase__ ( self : List[str] ) -> List[Any]:
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : Optional[int] , __snake_case : Optional[int] ) -> Any:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
_lowerCAmelCase = data_files
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__snake_case ):
with open(__snake_case , """rb""" ) as f:
_lowerCAmelCase = datasets.Features.from_arrow_schema(pq.read_schema(__snake_case ) )
break
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) )
return splits
def lowercase__ ( self : int , __snake_case : pa.Table ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase = table_cast(__snake_case , self.info.features.arrow_schema )
return pa_table
def lowercase__ ( self : Optional[Any] , __snake_case : Dict ) -> Optional[Any]:
_lowerCAmelCase = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
with open(__snake_case , """rb""" ) as f:
_lowerCAmelCase = pq.ParquetFile(__snake_case )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(__snake_case )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(__snake_case )}: {e}" )
raise
| 220
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def lowercase__ ( *__snake_case : Optional[Any] , **__snake_case : Any ) -> Tuple:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
_lowercase: Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowercase__ ( self : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] ) -> int:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_lowerCAmelCase = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowercase__ ( self : Any , __snake_case : List[Any] , __snake_case : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase = vqa_pipeline(__snake_case , top_k=1 )
self.assertEqual(
__snake_case , [
[{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}],
[{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}],
] , )
@require_torch
def lowercase__ ( self : str ) -> int:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = vqa_pipeline(image=__snake_case , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
__snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] )
_lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
__snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] )
@slow
@require_torch
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = vqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_lowerCAmelCase = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
pass
| 220
| 1
|
from __future__ import annotations
__lowerCAmelCase : int = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__lowerCAmelCase : Any = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __magic_name__ ( A : list[float] ):
'''simple docstring'''
a = []
a = len(A )
for i in range(A ):
a = -1
for j in range(i + 1, A ):
if arr[i] < arr[j]:
a = arr[j]
break
result.append(A )
return result
def __magic_name__ ( A : list[float] ):
'''simple docstring'''
a = []
for i, outer in enumerate(A ):
a = -1
for inner in arr[i + 1 :]:
if outer < inner:
a = inner
break
result.append(A )
return result
def __magic_name__ ( A : list[float] ):
'''simple docstring'''
a = len(A )
a = []
a = [-1] * arr_size
for index in reversed(range(A ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
a = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCAmelCase : str = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 107
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = XLMRobertaTokenizer
SCREAMING_SNAKE_CASE_ : int = XLMRobertaTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
def __UpperCAmelCase ( self : int ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Any:
a = "<pad>"
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 10_02 )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
a = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCamelCase , f.name )
a = XLMRobertaTokenizer(f.name , keep_accents=__lowerCamelCase )
a = pickle.dumps(__lowerCamelCase )
pickle.loads(__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> str:
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(__lowerCamelCase )
a = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Dict ) -> Any:
a = "Hello World!"
a = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Tuple ) -> int:
a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
a = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
# fmt: off
a = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 107
| 1
|
"""simple docstring"""
import requests
UpperCAmelCase: Dict = """YOUR API KEY"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = giphy_api_key ):
_lowercase : Optional[Any] = """+""".join(query.split() )
_lowercase : str = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
_lowercase : Any = requests.get(__UpperCAmelCase ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 370
|
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 336
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['LayoutLMv2FeatureExtractor']
UpperCAmelCase_ = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 201
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
snake_case_ = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ )
UpperCAmelCase = {
'repo_id': str(lowercase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def _lowerCAmelCase ( lowercase_ ):
if params.n_gpu <= 0:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = True
UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase = int(os.environ['N_GPU_NODE'] )
UpperCAmelCase = int(os.environ['RANK'] )
# number of nodes / node ID
UpperCAmelCase = params.world_size // params.n_gpu_per_node
UpperCAmelCase = params.global_rank // params.n_gpu_per_node
UpperCAmelCase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase = params.n_nodes > 1
# summary
UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _lowerCAmelCase ( lowercase_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 78
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Tuple=7 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Any=18 ,lowerCamelCase__ : Tuple=30 ,lowerCamelCase__ : Tuple=400 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=True ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Any=True ,):
'''simple docstring'''
_UpperCamelCase : Any = size if size is not None else {'shortest_edge': 20}
_UpperCamelCase : int = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCamelCase : List[str] = parent
_UpperCamelCase : List[Any] = batch_size
_UpperCamelCase : int = num_channels
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : List[Any] = min_resolution
_UpperCamelCase : List[str] = max_resolution
_UpperCamelCase : int = do_resize
_UpperCamelCase : Optional[int] = size
_UpperCamelCase : str = do_center_crop
_UpperCamelCase : List[Any] = crop_size
_UpperCamelCase : Optional[Any] = do_flip_channel_order
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'size' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_flip_channel_order' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
_UpperCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
_UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_UpperCamelCase : int = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
_UpperCamelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_UpperCamelCase : List[str] = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
_UpperCamelCase : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_UpperCamelCase : Optional[int] = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 362
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
snake_case_ : Union[str, Any] = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
snake_case_ : Any = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def A__ ( ):
_UpperCamelCase : str = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_UpperCamelCase : Any = bs[:]
_UpperCamelCase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase_ )
cs.append(2**8 + n )
n += 1
_UpperCamelCase : Any = [chr(UpperCAmelCase_ ) for n in cs]
return dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Tuple = set()
_UpperCamelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase : Any = char
return pairs
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]="replace" ,lowerCamelCase__ : str="<s>" ,lowerCamelCase__ : str="</s>" ,lowerCamelCase__ : str="</s>" ,lowerCamelCase__ : Any="<s>" ,lowerCamelCase__ : Tuple="<unk>" ,lowerCamelCase__ : List[str]="<pad>" ,lowerCamelCase__ : Optional[Any]="<mask>" ,lowerCamelCase__ : Tuple=False ,**lowerCamelCase__ : List[str] ,):
'''simple docstring'''
_UpperCamelCase : Dict = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else bos_token
_UpperCamelCase : Tuple = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
_UpperCamelCase : Optional[int] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else sep_token
_UpperCamelCase : Tuple = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else cls_token
_UpperCamelCase : Union[str, Any] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
_UpperCamelCase : List[str] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : str = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,add_prefix_space=lowerCamelCase__ ,**lowerCamelCase__ ,)
with open(lowerCamelCase__ ,encoding='utf-8' ) as vocab_handle:
_UpperCamelCase : List[str] = json.load(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : Optional[Any] = errors # how to handle errors in decoding
_UpperCamelCase : Tuple = bytes_to_unicode()
_UpperCamelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ ,encoding='utf-8' ) as merges_handle:
_UpperCamelCase : Dict = merges_handle.read().split('\n' )[1:-1]
_UpperCamelCase : str = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase : Dict = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
_UpperCamelCase : Tuple = {}
_UpperCamelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Any ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_UpperCamelCase : Dict = tuple(lowerCamelCase__ )
_UpperCamelCase : List[str] = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
_UpperCamelCase : List[str] = min(lowerCamelCase__ ,key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase : int = bigram
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Dict = 0
while i < len(lowerCamelCase__ ):
try:
_UpperCamelCase : int = word.index(lowerCamelCase__ ,lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase : Dict = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase : int = tuple(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
_UpperCamelCase : Any = get_pairs(lowerCamelCase__ )
_UpperCamelCase : int = ' '.join(lowerCamelCase__ )
_UpperCamelCase : List[Any] = word
return word
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : int = []
for token in re.findall(self.pat ,lowerCamelCase__ ):
_UpperCamelCase : int = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : int ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ )
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : Dict = ''.join(lowerCamelCase__ )
_UpperCamelCase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCamelCase : List[Any] = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_UpperCamelCase : Union[str, Any] = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase__ ,ensure_ascii=lowerCamelCase__ ) + '\n' )
_UpperCamelCase : Optional[Any] = 0
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
_UpperCamelCase : int = token_index
writer.write(' '.join(lowerCamelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
_UpperCamelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : Tuple = [self.sep_token_id]
_UpperCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple=False ,**lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Tuple = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
_UpperCamelCase : List[str] = ' ' + text
return (text, kwargs)
| 236
| 0
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase ) -> None:
'''simple docstring'''
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 68
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
"""simple docstring"""
A_ , A_ : List[str] = grid.shape
A_ : Optional[int] = [-1, 1, 0, 0]
A_ : str = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A_ , A_ : List[Any] = [(0, source)], set()
A_ : Optional[Any] = np.full((rows, cols) , np.inf )
A_ : int = 0
A_ : Optional[int] = np.empty((rows, cols) , dtype=_UpperCAmelCase )
A_ : Optional[int] = None
while queue:
((A_) , (A_)) : str = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A_ : int = []
while (x, y) != source:
path.append((x, y) )
A_ , A_ : List[Any] = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
A_ , A_ : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A_ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
A_ : Optional[Any] = dist + 1
A_ : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
| 0
|
from collections import namedtuple
__a = namedtuple('from_to', 'from_ to')
__a = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00454, 264.172),
'cubicyard': from_to(0.76455, 1.30795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.000236588, 4226.75),
}
def __magic_name__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(SCREAMING_SNAKE_CASE__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(SCREAMING_SNAKE_CASE__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = SwinConfig()
UpperCAmelCase_ : Dict = swin_name.split('''_''' )
UpperCAmelCase_ : List[Any] = name_split[1]
UpperCAmelCase_ : Optional[Any] = int(name_split[4] )
UpperCAmelCase_ : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
UpperCAmelCase_ : Tuple = 96
UpperCAmelCase_ : Optional[int] = (2, 2, 6, 2)
UpperCAmelCase_ : str = (3, 6, 12, 24)
elif model_size == "small":
UpperCAmelCase_ : Dict = 96
UpperCAmelCase_ : str = (2, 2, 18, 2)
UpperCAmelCase_ : List[str] = (3, 6, 12, 24)
elif model_size == "base":
UpperCAmelCase_ : Tuple = 128
UpperCAmelCase_ : Optional[int] = (2, 2, 18, 2)
UpperCAmelCase_ : Optional[int] = (4, 8, 16, 32)
else:
UpperCAmelCase_ : List[str] = 192
UpperCAmelCase_ : str = (2, 2, 18, 2)
UpperCAmelCase_ : Union[str, Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCAmelCase_ : int = 21841
else:
UpperCAmelCase_ : Tuple = 1000
UpperCAmelCase_ : Any = '''huggingface/label-files'''
UpperCAmelCase_ : int = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ : str = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : Optional[int] = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = img_size
UpperCAmelCase_ : List[Any] = num_classes
UpperCAmelCase_ : str = embed_dim
UpperCAmelCase_ : Optional[int] = depths
UpperCAmelCase_ : Dict = num_heads
UpperCAmelCase_ : Optional[int] = window_size
return config
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if "patch_embed.proj" in name:
UpperCAmelCase_ : str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCAmelCase_ : List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
UpperCAmelCase_ : Tuple = '''encoder.''' + name
if "attn.proj" in name:
UpperCAmelCase_ : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase_ : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase_ : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase_ : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
UpperCAmelCase_ : Optional[Any] = '''layernorm.weight'''
if name == "norm.bias":
UpperCAmelCase_ : List[str] = '''layernorm.bias'''
if "head" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''head''' , '''classifier''' )
else:
UpperCAmelCase_ : Union[str, Any] = '''swin.''' + name
return name
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : str = orig_state_dict.pop(_lowercase )
if "mask" in key:
continue
elif "qkv" in key:
UpperCAmelCase_ : str = key.split('''.''' )
UpperCAmelCase_ : str = int(key_split[1] )
UpperCAmelCase_ : Optional[Any] = int(key_split[3] )
UpperCAmelCase_ : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ : Any = val[:dim, :]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : List[str] = val[-dim:, :]
else:
UpperCAmelCase_ : Any = val[
:dim
]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2
]
UpperCAmelCase_ : Union[str, Any] = val[
-dim:
]
else:
UpperCAmelCase_ : Optional[Any] = val
return orig_state_dict
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = timm.create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
UpperCAmelCase_ : Any = get_swin_config(_lowercase )
UpperCAmelCase_ : Union[str, Any] = SwinForImageClassification(_lowercase )
model.eval()
UpperCAmelCase_ : Tuple = convert_state_dict(timm_model.state_dict() , _lowercase )
model.load_state_dict(_lowercase )
UpperCAmelCase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ : int = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
UpperCAmelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_lowercase , return_tensors='''pt''' )
UpperCAmelCase_ : Optional[Any] = timm_model(inputs['''pixel_values'''] )
UpperCAmelCase_ : Dict = model(**_lowercase ).logits
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 235
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : List[str] = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = ['LayoutLMv3FeatureExtractor']
_UpperCamelCase : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_UpperCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 220
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a ( a_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ):
lowercase = parent
lowercase = config_class
lowercase = has_text_modality
lowercase = kwargs
lowercase = common_properties
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
lowercase = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F'`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
lowercase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F'`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
lowercase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = os.path.join(_lowerCamelCase , 'config.json' )
config_first.to_json_file(_lowerCamelCase )
lowercase = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
lowercase = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
lowercase = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
lowercase = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowercase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCamelCase_ ( self ):
if self.config_class.is_composition:
return
lowercase = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = copy.deepcopy(_lowerCamelCase )
lowercase = self.config_class(**_lowerCamelCase )
lowercase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
lowercase = '\n'.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(F'The following keys were not properly set in the config:\n{errors}' )
def UpperCamelCase_ ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 220
| 1
|
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
A: Optional[Any] = logging.get_logger(__name__)
A: Any = "https://openaipublic.azureedge.net/jukebox/models/"
A: List[Any] = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def _snake_case ( UpperCamelCase : Tuple ):
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
UpperCAmelCase : List[str] = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
UpperCAmelCase : List[str] = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
UpperCAmelCase : Dict = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
UpperCAmelCase : Optional[Any] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
UpperCAmelCase : List[str] = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
UpperCAmelCase : Optional[int] = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCAmelCase : List[str] = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
UpperCAmelCase : Optional[Any] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ):
UpperCAmelCase : int = {}
import re
UpperCAmelCase : str = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
UpperCAmelCase : str = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
UpperCAmelCase : Union[str, Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
UpperCAmelCase : List[str] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
UpperCAmelCase : Union[str, Any] = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
UpperCAmelCase : Optional[Any] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
UpperCAmelCase : Optional[int] = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
UpperCAmelCase : Union[str, Any] = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
UpperCAmelCase : List[Any] = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(UpperCamelCase ):
UpperCAmelCase : Dict = re_encoder_block_conv_in.match(UpperCamelCase )
UpperCAmelCase : int = regex_match.groups()
UpperCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
UpperCAmelCase : List[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
UpperCAmelCase : str = re_encoder_block_conv_in.sub(UpperCamelCase , UpperCamelCase )
elif re_encoder_block_resnet.fullmatch(UpperCamelCase ):
UpperCAmelCase : Any = re_encoder_block_resnet.match(UpperCamelCase )
UpperCAmelCase : Optional[int] = regex_match.groups()
UpperCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
UpperCAmelCase : Tuple = {"""1""": 1, """3""": 2}[groups[-2]]
UpperCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
UpperCAmelCase : Any = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
UpperCAmelCase : List[str] = prefix + resnet_block
UpperCAmelCase : Optional[Any] = re_encoder_block_resnet.sub(UpperCamelCase , UpperCamelCase )
elif re_encoder_block_proj_out.fullmatch(UpperCamelCase ):
UpperCAmelCase : Optional[Any] = re_encoder_block_proj_out.match(UpperCamelCase )
UpperCAmelCase : str = regex_match.groups()
UpperCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
UpperCAmelCase : List[str] = re_encoder_block_proj_out.sub(UpperCamelCase , UpperCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(UpperCamelCase ):
UpperCAmelCase : str = re_decoder_block_conv_out.match(UpperCamelCase )
UpperCAmelCase : int = regex_match.groups()
UpperCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCAmelCase : Tuple = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
UpperCAmelCase : Optional[Any] = re_decoder_block_conv_out.sub(UpperCamelCase , UpperCamelCase )
elif re_decoder_block_resnet.fullmatch(UpperCamelCase ):
UpperCAmelCase : Dict = re_decoder_block_resnet.match(UpperCamelCase )
UpperCAmelCase : str = regex_match.groups()
UpperCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCAmelCase : int = {"""1""": 1, """3""": 2}[groups[-2]]
UpperCAmelCase : List[str] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
UpperCAmelCase : List[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
UpperCAmelCase : str = prefix + resnet_block
UpperCAmelCase : Any = re_decoder_block_resnet.sub(UpperCamelCase , UpperCamelCase )
elif re_decoder_block_proj_in.fullmatch(UpperCamelCase ):
UpperCAmelCase : Optional[Any] = re_decoder_block_proj_in.match(UpperCamelCase )
UpperCAmelCase : List[Any] = regex_match.groups()
UpperCAmelCase : int = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
UpperCAmelCase : List[Any] = re_decoder_block_proj_in.sub(UpperCamelCase , UpperCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(UpperCamelCase ):
UpperCAmelCase : Any = re_prior_cond_conv_out.match(UpperCamelCase )
UpperCAmelCase : List[Any] = regex_match.groups()
UpperCAmelCase : List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCAmelCase : Dict = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
UpperCAmelCase : List[Any] = re_prior_cond_conv_out.sub(UpperCamelCase , UpperCamelCase )
elif re_prior_cond_resnet.fullmatch(UpperCamelCase ):
UpperCAmelCase : Union[str, Any] = re_prior_cond_resnet.match(UpperCamelCase )
UpperCAmelCase : Dict = regex_match.groups()
UpperCAmelCase : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCAmelCase : Tuple = {"""1""": 1, """3""": 2}[groups[-2]]
UpperCAmelCase : int = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
UpperCAmelCase : List[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
UpperCAmelCase : int = prefix + resnet_block
UpperCAmelCase : Optional[int] = re_prior_cond_resnet.sub(UpperCamelCase , UpperCamelCase )
elif re_prior_cond_proj_in.fullmatch(UpperCamelCase ):
UpperCAmelCase : Optional[int] = re_prior_cond_proj_in.match(UpperCamelCase )
UpperCAmelCase : Optional[Any] = regex_match.groups()
UpperCAmelCase : Union[str, Any] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
UpperCAmelCase : Tuple = re_prior_cond_proj_in.sub(UpperCamelCase , UpperCamelCase )
# keep original key
else:
UpperCAmelCase : List[Any] = original_key
UpperCAmelCase : Tuple = replace_key(UpperCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
UpperCAmelCase : Optional[Any] = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
UpperCAmelCase : Any = original_key
UpperCAmelCase : Optional[int] = original_key
UpperCAmelCase : int = value
return new_dict
@torch.no_grad()
def _snake_case ( UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[int]=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
UpperCAmelCase : Optional[int] = requests.get(F"{PREFIX}{file}" , allow_redirects=UpperCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=UpperCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , """wb""" ).write(r.content )
UpperCAmelCase : Any = MODEL_MAPPING[model_name.split("""/""" )[-1]]
UpperCAmelCase : Union[str, Any] = JukeboxConfig.from_pretrained(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = JukeboxModel(UpperCamelCase )
UpperCAmelCase : Any = []
UpperCAmelCase : Union[str, Any] = {}
for i, dict_name in enumerate(UpperCamelCase ):
UpperCAmelCase : Dict = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["""model"""]
UpperCAmelCase : Optional[int] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
UpperCAmelCase : Optional[Any] = old_dic[k]
elif k.endswith(""".w""" ):
UpperCAmelCase : Dict = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCAmelCase : List[str] = old_dic[k]
else:
UpperCAmelCase : List[Any] = old_dic[k]
UpperCAmelCase : Dict = """vqvae""" if i == 0 else F"priors.{3 - i}"
UpperCAmelCase : Tuple = fix_jukebox_keys(UpperCamelCase , model.state_dict() , UpperCamelCase , UpperCamelCase )
weight_dict.append(UpperCamelCase )
UpperCAmelCase : List[str] = weight_dict.pop(0 )
model.vqvae.load_state_dict(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , """w""" ) as txtfile:
json.dump(UpperCamelCase , UpperCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase )
return weight_dict
if __name__ == "__main__":
A: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
A: str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 76
|
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : List[str]=[] ):
UpperCAmelCase : List[Any] = size[0] - overlap_pixels * 2
UpperCAmelCase : Dict = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCAmelCase : Union[str, Any] = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
UpperCAmelCase : Any = np.pad(UpperCamelCase , mode="""linear_ramp""" , pad_width=UpperCamelCase , end_values=0 )
if "l" in remove_borders:
UpperCAmelCase : Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCAmelCase : Optional[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCAmelCase : Any = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCAmelCase : str = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ):
return max(UpperCamelCase , min(UpperCamelCase , UpperCamelCase ) )
def _snake_case ( UpperCamelCase : [int] , UpperCamelCase : [int] , UpperCamelCase : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _snake_case ( UpperCamelCase : [int] , UpperCamelCase : int , UpperCamelCase : [int] ):
UpperCAmelCase : Optional[Any] = list(UpperCamelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCAmelCase : List[str] = clamp_rect(UpperCamelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Dict ):
UpperCAmelCase : Dict = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(UpperCamelCase , (original_slice, 0) )
return result
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : List[Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCAmelCase : List[Any] = tile.crop(UpperCamelCase )
return tile
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = n % d
return n - divisor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 350 , ) -> List[Any]:
'''simple docstring'''
super().__init__(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , low_res_scheduler=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , max_noise_level=_SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : List[str] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
UpperCAmelCase : Any = add_overlap_rect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , image.size )
UpperCAmelCase : int = image.crop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCAmelCase : Dict = translated_slice_x - (original_image_slice / 2)
UpperCAmelCase : Optional[int] = max(0 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = squeeze_tile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = to_input.size
UpperCAmelCase : Dict = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
UpperCAmelCase : Optional[Any] = super(_SCREAMING_SNAKE_CASE , self ).__call__(image=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).images[0]
UpperCAmelCase : int = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
UpperCAmelCase : Union[str, Any] = unsqueeze_tile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
UpperCAmelCase : int = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
UpperCAmelCase : Any = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=_SCREAMING_SNAKE_CASE ) , mode="""L""" , )
final_image.paste(
_SCREAMING_SNAKE_CASE , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 75 , _SCREAMING_SNAKE_CASE = 9.0 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
UpperCAmelCase : List[str] = math.ceil(image.size[0] / tile_size )
UpperCAmelCase : Optional[Any] = math.ceil(image.size[1] / tile_size )
UpperCAmelCase : Tuple = tcx * tcy
UpperCAmelCase : Any = 0
for y in range(_SCREAMING_SNAKE_CASE ):
for x in range(_SCREAMING_SNAKE_CASE ):
self._process_tile(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prompt=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , noise_level=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def _snake_case ( ):
# Run a demo
UpperCAmelCase : Tuple = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCAmelCase : int = StableDiffusionTiledUpscalePipeline.from_pretrained(UpperCamelCase , revision="""fp16""" , torch_dtype=torch.floataa )
UpperCAmelCase : Any = pipe.to("""cuda""" )
UpperCAmelCase : Optional[int] = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(UpperCamelCase : List[str] ):
print(F"progress: {obj['progress']:.4f}" )
obj["image"].save("""diffusers_library_progress.jpg""" )
UpperCAmelCase : Any = pipe(image=UpperCamelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=UpperCamelCase )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 76
| 1
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 17
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336
| 0
|
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> list[list[int]]:
_snake_case = []
_snake_case = []
_snake_case = 0
_snake_case = sum(__lowerCamelCase )
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return result
def _UpperCAmelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , ) -> None:
if sum(__lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(__lowerCamelCase )) < max_sum:
return
if sum(__lowerCamelCase ) == max_sum:
result.append(__lowerCamelCase )
return
for index in range(__lowerCamelCase , len(__lowerCamelCase ) ):
create_state_space_tree(
__lowerCamelCase , __lowerCamelCase , index + 1 , [*path, nums[index]] , __lowerCamelCase , remaining_nums_sum - nums[index] , )
UpperCAmelCase__ = [3, 34, 4, 12, 5, 2]
UpperCAmelCase__ = 9
UpperCAmelCase__ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 365
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( A_ ):
def __init__( self : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[int] ):
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31
|
import torch
def UpperCAmelCase__ ( ):
if torch.cuda.is_available():
lowercase :Optional[int] = torch.cuda.device_count()
else:
lowercase :Dict = 0
print(F"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 236
| 0
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def __lowerCamelCase ( __magic_name__ : List[str] , __magic_name__ : Any ):
# ===== initialization =====
a__: Optional[Any] =Mock()
a__: str =conn, Mock()
a__: List[Any] =iter([1, None] )
a__: Any =lambda __magic_name__ : next(__magic_name__ )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=__magic_name__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 42
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 42
| 1
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
a_ : List[Any] = pytest.mark.integration
@require_faiss
class snake_case ( lowercase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(UpperCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def snake_case ( self ):
"""simple docstring"""
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda UpperCamelCase , UpperCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCamelCase , keep_in_memory=UpperCamelCase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ ,lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def snake_case ( self ):
"""simple docstring"""
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ ,lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def snake_case ( self ):
"""simple docstring"""
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ ,lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(UpperCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def snake_case ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class snake_case ( lowercase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ ,lowerCamelCase_ = index.search(UpperCamelCase )
self.assertRaises(UpperCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ ,lowerCamelCase_ = index.search_batch(UpperCamelCase )
self.assertRaises(UpperCamelCase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCamelCase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def snake_case ( self ):
"""simple docstring"""
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=UpperCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def snake_case ( self ):
"""simple docstring"""
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ ,lowerCamelCase_ = index.search(UpperCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __snake_case ( UpperCAmelCase_ : List[Any] ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'''mock://{index_name}'''
index.save(UpperCAmelCase_ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(UpperCAmelCase_ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ ,lowerCamelCase_ = index.search(UpperCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case ( lowercase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=UpperCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ ,lowerCamelCase_ = index.search(UpperCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ ,lowerCamelCase_ = index.search(UpperCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ ,lowerCamelCase_ = index.search_batch(UpperCamelCase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCamelCase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ ,lowerCamelCase_ = index.search_batch(UpperCamelCase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCamelCase )
| 55
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a__ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
UpperCAmelCase__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = self.task_name.lower()
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "train"
UpperCAmelCase__ : List[str] = "dev"
UpperCAmelCase__ : Union[str, Any] = "test"
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : GlueDataTrainingArguments
UpperCAmelCase__ : str
UpperCAmelCase__ : List[InputFeatures]
def __init__( self , _a , _a , _a = None , _a = Split.train , _a = None , ) -> str:
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , _a , )
_a : List[str] = args
_a : List[str] = glue_processors[args.task_name]()
_a : List[Any] = glue_output_modes[args.task_name]
if isinstance(_a , _a ):
try:
_a : Any = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
_a : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_a : str = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_a , _a : Optional[int] = label_list[2], label_list[1]
_a : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
_a : List[Any] = time.time()
_a : str = torch.load(_a )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_a : str = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_a : Any = self.processor.get_test_examples(args.data_dir )
else:
_a : List[str] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_a : Dict = examples[:limit_length]
_a : Union[str, Any] = glue_convert_examples_to_features(
_a , _a , max_length=args.max_seq_length , label_list=_a , output_mode=self.output_mode , )
_a : Optional[int] = time.time()
torch.save(self.features , _a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , _a ) -> InputFeatures:
return self.features[i]
def __lowercase ( self ) -> Tuple:
return self.label_list
| 235
| 0
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_UpperCamelCase = """
import os
"""
_UpperCamelCase = """
def foo():
import os
return False
"""
_UpperCamelCase = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_UpperCamelCase = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_UpperCamelCase = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_UpperCamelCase = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_UpperCamelCase = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_UpperCamelCase = """
import os
try:
import bar
except:
raise ValueError()
"""
_UpperCamelCase = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_UpperCamelCase = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , _snake_case )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = os.path.join(_snake_case , """test_file.py""" )
with open(_snake_case , """w""" ) as _tmp_file:
_tmp_file.write(_snake_case )
UpperCAmelCase = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 234
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """▁"""
_UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
_UpperCamelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
_UpperCamelCase = {"""vinai/bartpho-syllable""": 1024}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self ,A ,A ,A="<s>" ,A="</s>" ,A="</s>" ,A="<s>" ,A="<unk>" ,A="<pad>" ,A="<mask>" ,A = None ,**A ,):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
UpperCAmelCase = vocab_file
UpperCAmelCase = monolingual_vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase = {}
UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = cnt
cnt += 1
with open(A ,"""r""" ,encoding="""utf-8""" ) as f:
for line in f.readlines():
UpperCAmelCase = line.strip().split()[0]
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(A ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,A ):
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self ,A ,A = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self ,A ,A = None ,A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self ):
return len(self.fairseq_ids_to_tokens )
def _UpperCamelCase ( self ):
UpperCAmelCase = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self ,A ):
return self.sp_model.encode(A ,out_type=A )
def _UpperCamelCase ( self ,A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _UpperCamelCase ( self ,A ):
return self.fairseq_ids_to_tokens[index]
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = """""".join(A ).replace(A ,""" """ ).strip()
return out_string
def _UpperCamelCase ( self ,A ,A = None ):
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"""wb""" ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,A )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(A )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 234
| 1
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a_ = 25_0004
a_ = 25_0020
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MBartaaTokenizer
lowerCamelCase__ =MBartaaTokenizerFast
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Tuple = MBartaaTokenizer(a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "<s>"
SCREAMING_SNAKE_CASE : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a ) , 1054 )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = MBartaaTokenizer(a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=a )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = {"input_ids": [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : Optional[int] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained(a , **a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained(a , **a )
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = tokenizer_r.save_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_p.save_pretrained(a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Optional[int] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(a , a )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(a )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(a )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.save_pretrained(a , legacy_format=a )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.save_pretrained(a )
# Checks it save with the same files
self.assertSequenceEqual(a , a )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.save_pretrained(a , legacy_format=a )
SCREAMING_SNAKE_CASE : str = tokenizer_p.save_pretrained(a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ ='facebook/mbart-large-50-one-to-many-mmt'
lowerCamelCase__ =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCamelCase__ =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCamelCase__ =[EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def __UpperCamelCase ( cls : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
SCREAMING_SNAKE_CASE : Optional[Any] = 1
return cls
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_0038 )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
self.assertIn(a , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : Dict = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(a , skip_special_tokens=a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , a )
SCREAMING_SNAKE_CASE : Tuple = 10
SCREAMING_SNAKE_CASE : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[0] , a )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(a ) , a )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_0053, 25_0001] )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
SCREAMING_SNAKE_CASE : int = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : int = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=10 , return_tensors="pt" )
SCREAMING_SNAKE_CASE : str = targets["input_ids"]
SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(a ) , {
# en_XX, A, test, EOS
"input_ids": [[25_0004, 62, 3034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_0001,
} , )
| 76
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50")
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101")
else:
raise ValueError("Model name should include either resnet50 or resnet101")
SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a)
# set label attributes
SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE : Union[str, Any] = 250
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 91
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r"))
SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( _a):
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
))
# 3 convs
for i in range(3):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
])
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : str = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : int = val
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE : Optional[Any] = ""
if is_panoptic:
SCREAMING_SNAKE_CASE : Optional[int] = "detr."
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a=None , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a)
# load original model from torch hub
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f"Converting model {model_name}...")
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval()
SCREAMING_SNAKE_CASE : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_a):
if is_panoptic:
SCREAMING_SNAKE_CASE : List[str] = "detr." + src
rename_key(_a , _a , _a)
# query, key and value matrices need special treatment
read_in_q_k_v(_a , is_panoptic=_a)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr")
and not key.startswith("class_labels_classifier")
and not key.startswith("bbox_predictor")
):
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Optional[int] = val
elif key.startswith("bbox_attention") or key.startswith("mask_head"):
continue
else:
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : List[Any] = val
else:
if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Any = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a)
model.load_state_dict(_a)
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a)
SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt")
SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a)
SCREAMING_SNAKE_CASE : Any = model(_a)
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
processor.save_pretrained(_a)
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub...")
model.push_to_hub(f"nielsr/{model_name}")
processor.push_to_hub(f"nielsr/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 76
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__a = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[str, Any] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **SCREAMING_SNAKE_CASE__ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase : Any = deprecated_arg[3:]
lowercase : int = not kwargs.pop(SCREAMING_SNAKE_CASE__ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase : str = kwargs.pop('''tpu_name''' , self.tpu_name )
lowercase : Tuple = kwargs.pop('''device_idx''' , self.device_idx )
lowercase : str = kwargs.pop('''eager_mode''' , self.eager_mode )
lowercase : Union[str, Any] = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : str = field(
default=A__ , metadata={'help': 'Name of TPU'} , )
A : int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
A : bool = field(default=A__ , metadata={'help': 'Benchmark models in eager model.'} )
A : bool = field(
default=A__ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def __lowerCamelCase ( self ):
requires_backends(self , ['''tf'''] )
lowercase : Union[str, Any] = None
if self.tpu:
try:
if self.tpu_name:
lowercase : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase : Tuple = None
return tpu
@cached_property
def __lowerCamelCase ( self ):
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase : Optional[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
lowercase : str = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
lowercase : List[str] = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def __lowerCamelCase ( self ):
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def __lowerCamelCase ( self ):
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def __lowerCamelCase ( self ):
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def __lowerCamelCase ( self ):
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __lowerCamelCase ( self ):
return self.n_gpu > 0
| 173
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0.9_9_9, _UpperCamelCase="cosine", ) ->Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase : List[str] = []
for i in range(_UpperCamelCase ):
lowercase : List[str] = i / num_diffusion_timesteps
lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCamelCase ) / alpha_bar_fn(_UpperCamelCase ), _UpperCamelCase ) )
return torch.tensor(_UpperCamelCase, dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE ( A__ , A__ ):
A : Any = [e.name for e in KarrasDiffusionSchedulers]
A : Dict = 2
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 1000 , SCREAMING_SNAKE_CASE__ = 0.00085 , SCREAMING_SNAKE_CASE__ = 0.012 , SCREAMING_SNAKE_CASE__ = "linear" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "epsilon" , SCREAMING_SNAKE_CASE__ = "linspace" , SCREAMING_SNAKE_CASE__ = 0 , ):
if trained_betas is not None:
lowercase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase : Union[str, Any] = torch.linspace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase : str = betas_for_alpha_bar(SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowercase : Optional[int] = 1.0 - self.betas
lowercase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
if schedule_timesteps is None:
lowercase : Union[str, Any] = self.timesteps
lowercase : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase : List[Any] = 1 if len(SCREAMING_SNAKE_CASE__ ) > 1 else 0
else:
lowercase : int = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep
lowercase : Optional[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
lowercase : Optional[Any] = self.index_for_timestep(SCREAMING_SNAKE_CASE__ )
if self.state_in_first_order:
lowercase : Any = self.sigmas[step_index]
else:
lowercase : Optional[int] = self.sigmas_interpol[step_index]
lowercase : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
lowercase : Any = num_inference_steps
lowercase : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase : Dict = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase : Union[str, Any] = (np.arange(0 , SCREAMING_SNAKE_CASE__ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase : Dict = (np.arange(SCREAMING_SNAKE_CASE__ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE__ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
lowercase : int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.interp(SCREAMING_SNAKE_CASE__ , np.arange(0 , len(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase : str = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ )
# interpolate sigmas
lowercase : int = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowercase : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowercase : Optional[int] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
# mps does not support float64
lowercase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
else:
lowercase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
# interpolate timesteps
lowercase : Any = self.sigma_to_t(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ , dtype=timesteps.dtype )
lowercase : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowercase : Dict = torch.cat([timesteps[:1], interleaved_timesteps] )
lowercase : int = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase : Dict = defaultdict(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
# get log sigma
lowercase : Any = sigma.log()
# get distribution
lowercase : Optional[Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowercase : List[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowercase : str = low_idx + 1
lowercase : Union[str, Any] = self.log_sigmas[low_idx]
lowercase : Union[str, Any] = self.log_sigmas[high_idx]
# interpolate sigmas
lowercase : Dict = (low - log_sigma) / (low - high)
lowercase : Union[str, Any] = w.clamp(0 , 1 )
# transform interpolation to time range
lowercase : List[str] = (1 - w) * low_idx + w * high_idx
lowercase : Tuple = t.view(sigma.shape )
return t
@property
def __lowerCamelCase ( self ):
return self.sample is None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , ):
lowercase : Optional[Any] = self.index_for_timestep(SCREAMING_SNAKE_CASE__ )
# advance index counter by 1
lowercase : Dict = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase : Union[str, Any] = self.sigmas[step_index]
lowercase : List[Any] = self.sigmas_interpol[step_index + 1]
lowercase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowercase : Any = self.sigmas[step_index - 1]
lowercase : List[Any] = self.sigmas_interpol[step_index]
lowercase : Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase : Union[str, Any] = 0
lowercase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase : Union[str, Any] = sigma_interpol - sigma_hat
# store for 2nd order step
lowercase : Optional[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowercase : str = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowercase : str = sigma_next - sigma_hat
lowercase : List[str] = self.sample
lowercase : Optional[int] = None
lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE__ ):
# mps does not support float64
lowercase : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowercase : List[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowercase : List[str] = self.timesteps.to(original_samples.device )
lowercase : Any = timesteps.to(original_samples.device )
lowercase : Tuple = [self.index_for_timestep(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for t in timesteps]
lowercase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase : Any = sigma.unsqueeze(-1 )
lowercase : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 173
| 1
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" ,set() )
@pytest.fixture
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : int ):
A_ = metric_id
class _a :
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __A ( self : List[str] ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" ,HfhMock() )
@pytest.mark.parametrize(
"func, args" ,[(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : str ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
if "tmp_path" in args:
A_ = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ ,match="https://huggingface.co/docs/evaluate" ):
func(*A_ )
| 312
|
"""simple docstring"""
__lowercase = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 40
| 0
|
from cva import destroyAllWindows, imread, imshow, waitKey
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
# getting number of pixels in the image
a__ , a__ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
a__ = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
snake_case : List[str] = imread('''image_data/lena.jpg''', 1)
# convert to its negative
snake_case : str = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 351
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
snake_case : str = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
snake_case : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
for attribute in key.split('.' ):
a__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
a__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
a__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
a__ = value
elif weight_type == "weight_g":
a__ = value
elif weight_type == "weight_v":
a__ = value
elif weight_type == "bias":
a__ = value
else:
a__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ):
a__ = []
a__ = fairseq_model.state_dict()
a__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
a__ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
a__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ = True
if "*" in mapped_key:
a__ = name.split(__lowerCAmelCase )[0].split('.' )[-2]
a__ = mapped_key.replace('*' , __lowerCAmelCase )
if "weight_g" in name:
a__ = 'weight_g'
elif "weight_v" in name:
a__ = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
a__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ = 'weight'
else:
a__ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ):
a__ = full_name.split('conv_layers.' )[-1]
a__ = name.split('.' )
a__ = int(items[0] )
a__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
a__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
a__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
a__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
a__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=None ):
# load the pre-trained checkpoints
a__ = torch.load(__lowerCAmelCase )
a__ = WavLMConfigOrig(checkpoint['cfg'] )
a__ = WavLMOrig(__lowerCAmelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
a__ = WavLMConfig.from_pretrained(__lowerCAmelCase )
else:
a__ = WavLMConfig()
a__ = WavLMModel(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase )
hf_wavlm.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
snake_case : Union[str, Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 109
| 0
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__()
_snake_case = nn.ModuleList(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCAmelCase_ , is_main_process=lowerCAmelCase_ , save_function=lowerCAmelCase_ , safe_serialization=lowerCAmelCase_ , variant=lowerCAmelCase_ , )
idx += 1
_snake_case = model_path_to_save + F'_{idx}'
@classmethod
def lowerCamelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowerCAmelCase_ ):
_snake_case = ControlNetModel.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
controlnets.append(lowerCAmelCase_ )
idx += 1
_snake_case = pretrained_model_path + F'_{idx}'
logger.info(F'{len(lowerCAmelCase_ )} controlnets loaded from {pretrained_model_path}.' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError(
F'No ControlNets found under {os.path.dirname(lowerCAmelCase_ )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(lowerCAmelCase_ )
| 42
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("DownEncoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_=True , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = torch.nn.Convad(
lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
# down
_snake_case = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_down_block(
lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# out
_snake_case = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = 2 * out_channels if double_z else out_channels
_snake_case = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = x
_snake_case = self.conv_in(lowerCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
_snake_case = down_block(lowerCAmelCase_ )
# middle
_snake_case = self.mid_block(lowerCAmelCase_ )
# post-process
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("UpDecoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_="group" , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = nn.Convad(
lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
_snake_case = in_channels if norm_type == 'spatial' else None
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# up
_snake_case = list(reversed(lowerCAmelCase_ ) )
_snake_case = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = reversed_block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_up_block(
lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , )
self.up_blocks.append(lowerCAmelCase_ )
_snake_case = output_channel
# out
if norm_type == "spatial":
_snake_case = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ )
else:
_snake_case = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = z
_snake_case = self.conv_in(lowerCAmelCase_ )
_snake_case = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# middle
_snake_case = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = up_block(lowerCAmelCase_ , lowerCAmelCase_ )
# post-process
if latent_embeds is None:
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
else:
_snake_case = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="random" , lowerCAmelCase_=False , lowerCAmelCase_=True ):
"""simple docstring"""
super().__init__()
_snake_case = n_e
_snake_case = vq_embed_dim
_snake_case = beta
_snake_case = legacy
_snake_case = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_snake_case = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_snake_case = self.used.shape[0]
_snake_case = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_snake_case = self.re_embed
_snake_case = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
_snake_case = n_e
_snake_case = sane_index_shape
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
_snake_case = (inds[:, :, None] == used[None, None, ...]).long()
_snake_case = match.argmax(-1 )
_snake_case = match.sum(2 ) < 1
if self.unknown_index == "random":
_snake_case = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_snake_case = self.unknown_index
return new.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_snake_case = 0 # simply set to zero
_snake_case = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ )
return back.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = z.permute(0 , 2 , 3 , 1 ).contiguous()
_snake_case = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_snake_case = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 )
_snake_case = self.embedding(lowerCAmelCase_ ).view(z.shape )
_snake_case = None
_snake_case = None
# compute loss for embedding
if not self.legacy:
_snake_case = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_snake_case = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_snake_case = z + (z_q - z).detach()
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_snake_case = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_snake_case = self.remap_to_used(lowerCAmelCase_ )
_snake_case = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_snake_case = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.remap is not None:
_snake_case = indices.reshape(shape[0] , -1 ) # add batch axis
_snake_case = self.unmap_to_all(lowerCAmelCase_ )
_snake_case = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_snake_case = self.embedding(lowerCAmelCase_ )
if shape is not None:
_snake_case = z_q.view(lowerCAmelCase_ )
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = parameters
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , dim=1 )
_snake_case = torch.clamp(self.logvar , -30.0 , 20.0 )
_snake_case = deterministic
_snake_case = torch.exp(0.5 * self.logvar )
_snake_case = torch.exp(self.logvar )
if self.deterministic:
_snake_case = _snake_case = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase ( self , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = randn_tensor(
self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_snake_case = self.mean + self.std * sample
return x
def lowerCamelCase ( self , lowerCAmelCase_=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_snake_case = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
return self.mean
| 42
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowercase__ ( lowercase ):
lowercase__ = 42
lowercase__ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 350
|
'''simple docstring'''
from datetime import datetime
import requests
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
_UpperCamelCase : Optional[int] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
snake_case_ : List[str] = input('Enter Video/IGTV url: ').strip()
snake_case_ : str = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 236
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowerCamelCase : Dict = False
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = False
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
_lowerCamelCase : Optional[Any] = parser.parse_args()
_lowerCamelCase : Dict = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
_lowerCamelCase : List[str] = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
_lowerCamelCase : Optional[int] = '''''' if has_file(args.repo_path, 'config.json') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
_lowerCamelCase : Union[str, Any] = reader.read()
_lowerCamelCase : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
_lowerCamelCase : List[str] = UNetaDModel(**config)
else:
_lowerCamelCase : str = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
_lowerCamelCase : Optional[Any] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowerCamelCase : Any = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowerCamelCase : List[str] = config[key]
del config[key]
_lowerCamelCase : Optional[Any] = [k.replace('UNetRes', '') for k in config['''down_block_types''']]
_lowerCamelCase : Optional[int] = [k.replace('UNetRes', '') for k in config['''up_block_types''']]
if do_only_weights:
_lowerCamelCase : List[Any] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
_lowerCamelCase : int = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
_lowerCamelCase : Tuple = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
_lowerCamelCase : int = param_value
_lowerCamelCase : int = True
if not has_changed:
_lowerCamelCase : str = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 167
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
lowerCamelCase : Any = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
lowerCamelCase : Optional[int] = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = CamembertTokenizer
_A : Union[str, Any] = CamembertTokenizerFast
_A : Union[str, Any] = True
_A : Tuple = True
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : Union[str, Any] = CamembertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = """<pad>"""
__lowercase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__a ) , 1004 )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = CamembertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
__lowercase : Tuple = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowercase : List[str] = """I was born in 92000, and this is falsé."""
__lowercase : Optional[Any] = tokenizer.encode(__a )
__lowercase : List[Any] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
__lowercase : Tuple = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Union[str, Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowercase : Dict = tokenizer.convert_ids_to_tokens(__a )
__lowercase : Any = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase : List[str] = self.get_tokenizer()
__lowercase : Any = self.get_rust_tokenizer()
__lowercase : Any = """I was born in 92000, and this is falsé."""
__lowercase : Tuple = tokenizer.tokenize(__a )
__lowercase : Optional[Any] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__lowercase : Any = self.get_rust_tokenizer()
__lowercase : str = tokenizer.encode(__a )
__lowercase : Union[str, Any] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowercase : List[str] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__a , )
| 233
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ :List[str] = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Tuple = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Any = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[Any] = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 352
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=9 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.002 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return TaConfig.from_pretrained('google/umt5-base' )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, input_dict
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(
input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_SCREAMING_SNAKE_CASE ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=_SCREAMING_SNAKE_CASE ).get_decoder().to(_SCREAMING_SNAKE_CASE ).eval()
# first forward pass
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) + 1 )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )['last_hidden_state']
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )['last_hidden_state']
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).half().eval()
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )['last_hidden_state']
self.parent.assertFalse(torch.isnan(_SCREAMING_SNAKE_CASE ).any().item() )
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_a : List[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_a : Tuple = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_a : List[str] = True
_a : List[Any] = False
_a : Tuple = False
_a : List[Any] = True
_a : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_a : Tuple = [0.8, 0.9]
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_SCREAMING_SNAKE_CASE , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_SCREAMING_SNAKE_CASE , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval()
model.to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ),
}
for attn_name, (name, mask) in zip(_SCREAMING_SNAKE_CASE , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=_SCREAMING_SNAKE_CASE , legacy=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=_SCREAMING_SNAKE_CASE ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model.generate(input_ids.to(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_UpperCAmelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 185
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =coefficient_matrix.shape
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE_: Dict =f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowercase )
if colsa != 1:
SCREAMING_SNAKE_CASE_: Optional[int] =f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowercase )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE_: Tuple =(
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowercase )
if len(lowercase ) != rowsa:
SCREAMING_SNAKE_CASE_: Optional[Any] =(
"""Number of initial values must be equal to number of rows in coefficient """
f'''matrix but received {len(lowercase )} and {rowsa}'''
)
raise ValueError(lowercase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
SCREAMING_SNAKE_CASE_: NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =table.shape
strictly_diagonally_dominant(lowercase )
# Iterates the whole matrix for given number of times
for _ in range(lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for row in range(lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =0
for col in range(lowercase ):
if col == row:
SCREAMING_SNAKE_CASE_: int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE_: Tuple =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE_: int =(temp + val) / denom
new_val.append(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =new_val
return [float(lowercase ) for i in new_val]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =table.shape
SCREAMING_SNAKE_CASE_: List[Any] =True
for i in range(0 , lowercase ):
SCREAMING_SNAKE_CASE_: int =0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
_UpperCAmelCase = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
_UpperCAmelCase = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
_UpperCAmelCase = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[Any]="binary" , lowerCAmelCase : int=None , lowerCAmelCase : str="warn" , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =recall_score(
lowerCAmelCase , lowerCAmelCase , labels=lowerCAmelCase , pos_label=lowerCAmelCase , average=lowerCAmelCase , sample_weight=lowerCAmelCase , zero_division=lowerCAmelCase , )
return {"recall": float(lowerCAmelCase ) if score.size == 1 else score}
| 173
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCamelCase__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = 'vit_mae'
def __init__(self : Union[str, Any] , snake_case_ : List[Any]=7_6_8 , snake_case_ : Optional[int]=1_2 , snake_case_ : Any=1_2 , snake_case_ : Any=3_0_7_2 , snake_case_ : List[str]="gelu" , snake_case_ : Any=0.0 , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.02 , snake_case_ : List[str]=1E-12 , snake_case_ : Dict=2_2_4 , snake_case_ : Dict=1_6 , snake_case_ : List[Any]=3 , snake_case_ : Dict=True , snake_case_ : List[Any]=1_6 , snake_case_ : int=5_1_2 , snake_case_ : Dict=8 , snake_case_ : List[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=0.75 , snake_case_ : List[str]=False , **snake_case_ : Tuple , ):
super().__init__(**_UpperCAmelCase )
__a : Any = hidden_size
__a : str = num_hidden_layers
__a : Dict = num_attention_heads
__a : str = intermediate_size
__a : Tuple = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : int = layer_norm_eps
__a : Any = image_size
__a : List[str] = patch_size
__a : str = num_channels
__a : Tuple = qkv_bias
__a : Tuple = decoder_num_attention_heads
__a : Union[str, Any] = decoder_hidden_size
__a : List[str] = decoder_num_hidden_layers
__a : Union[str, Any] = decoder_intermediate_size
__a : Optional[int] = mask_ratio
__a : Optional[int] = norm_pix_loss
| 368
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase__ ='bert-base-cased'
lowercase__ ='google/pegasus-xsum'
lowercase__ =[' Sam ate lunch today.', 'Sams lunch ingredients.']
lowercase__ =['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
lowercase__ ='patrickvonplaten/t5-tiny-random'
lowercase__ ='sshleifer/bart-tiny-random'
lowercase__ ='sshleifer/tiny-mbart'
lowercase__ ='sshleifer/tiny-marian-en-de'
def __UpperCamelCase ( lowerCAmelCase__ : Path , lowerCAmelCase__ : list ):
__a : List[Any] = '''\n'''.join(lowerCAmelCase__ )
Path(lowerCAmelCase__ ).open('''w''' ).writelines(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : int ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase__ , f"{split}.source" ) , lowerCAmelCase__ )
_dump_articles(os.path.join(lowerCAmelCase__ , f"{split}.target" ) , lowerCAmelCase__ )
return tmp_dir
class UpperCamelCase__ ( __lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCAmelCase (self : int , snake_case_ : int ):
__a : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
__a : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__a : Union[str, Any] = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
__a : str = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
__a : str = 4
__a : Dict = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__a , __a : Any = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
__a : List[Any] = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=snake_case_ , max_target_length=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , )
__a : Dict = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(snake_case_ , snake_case_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__a : Dict = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : str ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ )
__a : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__a : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
__a : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
__a : Dict = 4
__a : Optional[int] = LegacySeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=2_0 , max_target_length=snake_case_ , )
__a : Optional[Any] = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCAmelCase (self : List[str] ):
__a : int = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
__a : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__a : Optional[int] = tmp_dir.joinpath('''train.source''' ).open().readlines()
__a : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(snake_case_ , snake_case_ , 1_2_8 , snake_case_ )
__a : Optional[Any] = {x.name for x in tmp_dir.iterdir()}
__a : Union[str, Any] = {x.name for x in save_dir.iterdir()}
__a : str = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(snake_case_ ) < len(snake_case_ )
assert len(snake_case_ ) == 1
assert len(packed_examples[0] ) == sum(len(snake_case_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def lowerCAmelCase (self : Any ):
if not FAIRSEQ_AVAILABLE:
return
__a , __a , __a : Any = self._get_dataset(max_len=6_4 )
__a : int = 6_4
__a : List[str] = ds.make_dynamic_sampler(snake_case_ , required_batch_size_multiple=snake_case_ )
__a : List[str] = [len(snake_case_ ) for x in batch_sampler]
assert len(set(snake_case_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(snake_case_ ) == len(snake_case_ ) # no dropped or added examples
__a : Union[str, Any] = DataLoader(snake_case_ , batch_sampler=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
__a : Tuple = []
__a : Union[str, Any] = []
for batch in data_loader:
__a : Any = batch['''input_ids'''].shape
__a : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__a : Optional[Any] = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(snake_case_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(snake_case_ )
assert num_src_per_batch[0] == max(snake_case_ )
if failures:
raise AssertionError(f"too many tokens in {len(snake_case_ )} batches" )
def lowerCAmelCase (self : int ):
__a , __a , __a : Optional[int] = self._get_dataset(max_len=5_1_2 )
__a : Union[str, Any] = 2
__a : str = ds.make_sortish_sampler(snake_case_ , shuffle=snake_case_ )
__a : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
__a : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=snake_case_ )
__a : Optional[int] = tokenizer.pad_token_id
def count_pad_tokens(snake_case_ : Union[str, Any] , snake_case_ : List[str]="input_ids" ):
return [batch[k].eq(snake_case_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(snake_case_ , k='''labels''' ) ) < sum(count_pad_tokens(snake_case_ , k='''labels''' ) )
assert sum(count_pad_tokens(snake_case_ ) ) < sum(count_pad_tokens(snake_case_ ) )
assert len(snake_case_ ) == len(snake_case_ )
def lowerCAmelCase (self : int , snake_case_ : int=1_0_0_0 , snake_case_ : Optional[Any]=1_2_8 ):
if os.getenv('''USE_REAL_DATA''' , snake_case_ ):
__a : Optional[int] = '''examples/seq2seq/wmt_en_ro'''
__a : List[Any] = max_len * 2 * 6_4
if not Path(snake_case_ ).joinpath('''train.len''' ).exists():
save_len_file(snake_case_ , snake_case_ )
else:
__a : int = '''examples/seq2seq/test_data/wmt_en_ro'''
__a : List[str] = max_len * 4
save_len_file(snake_case_ , snake_case_ )
__a : str = AutoTokenizer.from_pretrained(snake_case_ )
__a : Optional[int] = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=snake_case_ , max_target_length=snake_case_ , n_obs=snake_case_ , )
return ds, max_tokens, tokenizer
def lowerCAmelCase (self : List[str] ):
__a , __a , __a : str = self._get_dataset()
__a : Optional[Any] = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=snake_case_ ) )
__a : Tuple = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=snake_case_ ) )
assert idsa.intersection(snake_case_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCAmelCase (self : str , snake_case_ : Union[str, Any] ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ , use_fast=snake_case_ )
if tok_name == MBART_TINY:
__a : Any = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__a : Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__a : Optional[Any] = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__a : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(snake_case_ ) == 1 if tok_name == BART_TINY else len(snake_case_ ) == 0
| 90
| 0
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] = 13 , UpperCAmelCase__ : Tuple = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : Any = 3 , UpperCAmelCase__ : str = 3 , UpperCAmelCase__ : Tuple = True , UpperCAmelCase__ : int = True , UpperCAmelCase__ : List[str] = 128 , UpperCAmelCase__ : Union[str, Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : Optional[Any] = 4 , UpperCAmelCase__ : List[Any] = 37 , UpperCAmelCase__ : Optional[Any] = "gelu" , UpperCAmelCase__ : Optional[Any] = 0.1 , UpperCAmelCase__ : str = 0.1 , UpperCAmelCase__ : Optional[int] = 10 , UpperCAmelCase__ : Any = 0.02 , UpperCAmelCase__ : str = 2 , UpperCAmelCase__ : Optional[Any] = 1 , UpperCAmelCase__ : Optional[int] = 128 , UpperCAmelCase__ : List[Any] = [2, 2, 2, 2] , UpperCAmelCase__ : Any = 2 , UpperCAmelCase__ : Tuple = 2 , ) ->Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=_SCREAMING_SNAKE_CASE)
A__ = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(_SCREAMING_SNAKE_CASE)
A__ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(_SCREAMING_SNAKE_CASE)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_SCREAMING_SNAKE_CASE)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str):
A__ = model_class(_SCREAMING_SNAKE_CASE)
A__ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , training=_SCREAMING_SNAKE_CASE)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(_SCREAMING_SNAKE_CASE , (list, tuple))
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
A__ = getattr(self.model_tester , '''seq_length''' , _SCREAMING_SNAKE_CASE)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , _SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int=False) ->Dict:
'''simple docstring'''
A__ = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE)
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , _SCREAMING_SNAKE_CASE)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , _SCREAMING_SNAKE_CASE)
A__ = getattr(self.model_tester , '''key_length''' , _SCREAMING_SNAKE_CASE)
A__ = getattr(self.model_tester , '''chunk_length''' , _SCREAMING_SNAKE_CASE)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_SCREAMING_SNAKE_CASE)
A__ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , training=_SCREAMING_SNAKE_CASE)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_SCREAMING_SNAKE_CASE)
A__ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , training=_SCREAMING_SNAKE_CASE)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(_SCREAMING_SNAKE_CASE)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_SCREAMING_SNAKE_CASE)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(_SCREAMING_SNAKE_CASE)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''tf''')
# forward pass
A__ = model(**_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->str:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''tf''')
# forward pass
A__ = model(**_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4))
| 14
|
"""simple docstring"""
A: int = range(2, 2_0 + 1)
A: Any = [1_0**k for k in range(ks[-1] + 1)]
A: dict[int, dict[int, list[list[int]]]] = {}
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : int ):
UpperCAmelCase : List[str] = sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) )
UpperCAmelCase : str = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) )
UpperCAmelCase , UpperCAmelCase : str = 0, 0
UpperCAmelCase : Optional[Any] = n - i
UpperCAmelCase : Optional[int] = memo.get(UpperCamelCase )
if sub_memo is not None:
UpperCAmelCase : str = sub_memo.get(UpperCamelCase )
if jumps is not None and len(UpperCamelCase ) > 0:
# find and make the largest jump without going over
UpperCAmelCase : Tuple = -1
for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase : int = _k
break
if max_jump >= 0:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase : List[str] = diff + c
for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = divmod(UpperCamelCase , 10 )
if new_c > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
UpperCAmelCase : int = []
else:
UpperCAmelCase : List[str] = {c: []}
UpperCAmelCase : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase , UpperCAmelCase : List[str] = next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase , UpperCAmelCase : int = compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
UpperCAmelCase : Dict = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase : str = 0
while j < len(UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def _snake_case ( UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ):
if i >= n:
return 0, i
if k > len(UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase : List[str] = i
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = 0, 0, 0
for j in range(len(UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase : Optional[int] = ds_c + ds_b
diff += addend
UpperCAmelCase : str = 0
for j in range(UpperCamelCase ):
UpperCAmelCase : Any = a_i[j] + addend
UpperCAmelCase , UpperCAmelCase : Any = divmod(UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return diff, i - start_i
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[int] ):
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
UpperCAmelCase : Optional[int] = digits[j] + addend
if s >= 10:
UpperCAmelCase , UpperCAmelCase : int = divmod(UpperCamelCase , 10 )
UpperCAmelCase : str = addend // 10 + quotient
else:
UpperCAmelCase : Any = s
UpperCAmelCase : Union[str, Any] = addend // 10
if addend == 0:
break
while addend > 0:
UpperCAmelCase , UpperCAmelCase : Any = divmod(UpperCamelCase , 10 )
digits.append(UpperCamelCase )
def _snake_case ( UpperCamelCase : int = 10**15 ):
UpperCAmelCase : Dict = [1]
UpperCAmelCase : int = 1
UpperCAmelCase : Tuple = 0
while True:
UpperCAmelCase , UpperCAmelCase : Tuple = next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase : Any = 0
for j in range(len(UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109
| 0
|
"""simple docstring"""
import math
import random
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCAmelCase: Tuple = 0.02
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Union[str, Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCAmelCase ):
# Forward propagation
_lowercase : Union[str, Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_lowercase : Union[str, Any] = (expected / 100) - layer_a
# Error delta
_lowercase : Union[str, Any] = layer_1_error * sigmoid_function(__UpperCAmelCase , __UpperCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase: Optional[Any] = int(input("""Expected value: """))
UpperCAmelCase: Any = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 361
|
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ["CLIPFeatureExtractor"]
_lowercase : Dict = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
lowercase :Tuple = 1024
lowercase :Tuple = 4096
lowercase :Union[str, Any] = 24
lowercase :Tuple = 16
lowercase :Optional[Any] = [5, 11, 17, 23]
lowercase :Optional[Any] = [256, 512, 1024, 1024]
lowercase :Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowercase :Any = 768
lowercase :Optional[int] = [1, 1, 1, 0.5]
lowercase :int = [256, 512, 768, 768]
lowercase :str = 150
lowercase :Tuple = 16
lowercase :List[str] = (1, 384, 384)
lowercase :Any = False
lowercase :Optional[int] = "project"
if "ade" in checkpoint_url:
lowercase :Dict = True
lowercase :str = 768
lowercase :Tuple = [1, 1, 1, 0.5]
lowercase :Dict = 150
lowercase :Any = 16
lowercase :List[Any] = "huggingface/label-files"
lowercase :Union[str, Any] = "ade20k-id2label.json"
lowercase :List[str] = json.load(open(cached_download(hf_hub_url(lowerCamelCase, lowerCamelCase, repo_type="dataset" ) ), "r" ) )
lowercase :Optional[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowercase :Union[str, Any] = idalabel
lowercase :Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase :List[str] = [1, 150, 480, 480]
return config, expected_shape
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[str] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase :Any = name.replace("pretrained.model", "dpt.encoder" )
if "pretrained.model" in name:
lowercase :int = name.replace("pretrained.model", "dpt.embeddings" )
if "patch_embed" in name:
lowercase :Any = name.replace("patch_embed", "" )
if "pos_embed" in name:
lowercase :Optional[Any] = name.replace("pos_embed", "position_embeddings" )
if "attn.proj" in name:
lowercase :List[str] = name.replace("attn.proj", "attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase :str = name.replace("proj", "projection" )
if "blocks" in name:
lowercase :List[Any] = name.replace("blocks", "layer" )
if "mlp.fc1" in name:
lowercase :str = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
lowercase :Any = name.replace("mlp.fc2", "output.dense" )
if "norm1" in name and "backbone" not in name:
lowercase :Any = name.replace("norm1", "layernorm_before" )
if "norm2" in name and "backbone" not in name:
lowercase :Any = name.replace("norm2", "layernorm_after" )
if "scratch.output_conv" in name:
lowercase :Optional[Any] = name.replace("scratch.output_conv", "head" )
if "scratch" in name:
lowercase :Union[str, Any] = name.replace("scratch", "neck" )
if "layer1_rn" in name:
lowercase :str = name.replace("layer1_rn", "convs.0" )
if "layer2_rn" in name:
lowercase :Dict = name.replace("layer2_rn", "convs.1" )
if "layer3_rn" in name:
lowercase :Tuple = name.replace("layer3_rn", "convs.2" )
if "layer4_rn" in name:
lowercase :Optional[int] = name.replace("layer4_rn", "convs.3" )
if "refinenet" in name:
lowercase :Any = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase :Any = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowercase :str = name.replace("out_conv", "projection" )
if "resConfUnit1" in name:
lowercase :Optional[Any] = name.replace("resConfUnit1", "residual_layer1" )
if "resConfUnit2" in name:
lowercase :int = name.replace("resConfUnit2", "residual_layer2" )
if "conv1" in name:
lowercase :Optional[int] = name.replace("conv1", "convolution1" )
if "conv2" in name:
lowercase :List[str] = name.replace("conv2", "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase :Tuple = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase :Dict = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase :Any = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase :Optional[Any] = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase :Dict = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase :Any = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase :List[Any] = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase :str = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase :str = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase :List[Any] = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase :Any = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase :Any = name.replace("pretrained", "dpt" )
if "bn" in name:
lowercase :Optional[int] = name.replace("bn", "batch_norm" )
if "head" in name:
lowercase :Union[str, Any] = name.replace("head", "head.head" )
if "encoder.norm" in name:
lowercase :Optional[Any] = name.replace("encoder.norm", "layernorm" )
if "auxlayer" in name:
lowercase :str = name.replace("auxlayer", "auxiliary_head.head" )
if "backbone" in name:
lowercase :List[str] = name.replace("backbone", "backbone.bit.encoder" )
if ".." in name:
lowercase :Optional[int] = name.replace("..", "." )
if "stem.conv" in name:
lowercase :Union[str, Any] = name.replace("stem.conv", "bit.embedder.convolution" )
if "blocks" in name:
lowercase :List[str] = name.replace("blocks", "layers" )
if "convolution" in name and "backbone" in name:
lowercase :int = name.replace("convolution", "conv" )
if "layer" in name and "backbone" in name:
lowercase :List[str] = name.replace("layer", "layers" )
if "backbone.bit.encoder.bit" in name:
lowercase :Dict = name.replace("backbone.bit.encoder.bit", "backbone.bit" )
if "embedder.conv" in name:
lowercase :Dict = name.replace("embedder.conv", "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
lowercase :str = name.replace("backbone.bit.encoder.stem.norm", "backbone.bit.embedder.norm" )
return name
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase :str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowercase :str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase :List[str] = in_proj_weight[: config.hidden_size, :]
lowercase :int = in_proj_bias[: config.hidden_size]
lowercase :Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase :Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase :Dict = in_proj_weight[
-config.hidden_size :, :
]
lowercase :List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( ):
lowercase :List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase :Any = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase , lowercase :Optional[int] = get_dpt_config(lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowercase :List[str] = torch.load(lowerCamelCase, map_location="cpu" )
# remove certain keys
remove_ignore_keys_(lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
lowercase :int = state_dict.pop(lowerCamelCase )
lowercase :Dict = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase, lowerCamelCase )
# load HuggingFace model
lowercase :int = DPTForSemanticSegmentation(lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# Check outputs on an image
lowercase :Union[str, Any] = 480 if "ade" in checkpoint_url else 384
lowercase :Optional[int] = DPTImageProcessor(size=lowerCamelCase )
lowercase :Dict = prepare_img()
lowercase :Optional[Any] = image_processor(lowerCamelCase, return_tensors="pt" )
# forward pass
lowercase :Any = model(**lowerCamelCase ).logits if "ade" in checkpoint_url else model(**lowerCamelCase ).predicted_depth
if show_prediction:
lowercase :List[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode="bicubic", align_corners=lowerCamelCase, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 236
| 0
|
'''simple docstring'''
import datasets
__a = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
__a = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
__a = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCamelCase ( self : int , snake_case_ : Any , snake_case_ : int ):
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
| 43
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__a = True
except (ImportError, ModuleNotFoundError):
__a = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __snake_case( _lowerCAmelCase ) -> str:
re.sub("""<n>""" , """""" , _lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 43
| 1
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Dict = 0
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Any = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : str = Path(SCREAMING_SNAKE_CASE_ ) / 'preprocessor_config.json'
__lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE_ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(SCREAMING_SNAKE_CASE_ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(SCREAMING_SNAKE_CASE_ , 'w' ) )
__lowerCamelCase : int = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : Optional[int] = Path(SCREAMING_SNAKE_CASE_ ) / 'preprocessor_config.json'
__lowerCamelCase : str = Path(SCREAMING_SNAKE_CASE_ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(SCREAMING_SNAKE_CASE_ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(SCREAMING_SNAKE_CASE_ , 'w' ) )
__lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : List[Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowerCamelCase : Tuple = Path(SCREAMING_SNAKE_CASE_ ) / 'preprocessor_config.json'
__lowerCamelCase : Tuple = Path(SCREAMING_SNAKE_CASE_ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(SCREAMING_SNAKE_CASE_ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(SCREAMING_SNAKE_CASE_ , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowerCamelCase : Dict = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ ).to_dict()
config_dict.pop('image_processor_type' )
__lowerCamelCase : Optional[int] = CLIPImageProcessor(**SCREAMING_SNAKE_CASE_ )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : List[Any] = Path(SCREAMING_SNAKE_CASE_ ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(SCREAMING_SNAKE_CASE_ , 'w' ) , )
__lowerCamelCase : str = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Tuple:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , 'clip-base is not a local folder and is not a valid model identifier' ):
__lowerCamelCase : Any = AutoImageProcessor.from_pretrained('clip-base' )
def lowercase_ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ , revision='aaaaaa' )
def lowercase_ ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__lowerCamelCase : str = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def lowercase_ ( self ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def lowercase_ ( self ) -> Dict:
try:
AutoConfig.register('custom' , SCREAMING_SNAKE_CASE_ )
AutoImageProcessor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
AutoImageProcessor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : int = Path(SCREAMING_SNAKE_CASE_ ) / 'preprocessor_config.json'
__lowerCamelCase : List[str] = Path(SCREAMING_SNAKE_CASE_ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(SCREAMING_SNAKE_CASE_ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(SCREAMING_SNAKE_CASE_ , 'w' ) )
__lowerCamelCase : List[str] = CustomImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self ) -> Any:
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = True
try:
AutoConfig.register('custom' , SCREAMING_SNAKE_CASE_ )
AutoImageProcessor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# If remote code is not set, the default is to use local
__lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(SCREAMING_SNAKE_CASE_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 185
|
'''simple docstring'''
A__ : Optional[int] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCamelCase : Dict = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = ''.join(bin(UpperCAmelCase_ )[2:].zfill(8 ) for byte in data )
__lowerCamelCase : Any = len(UpperCAmelCase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__lowerCamelCase : int = B'=' * ((6 - len(UpperCAmelCase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase_ ) % 6)
else:
__lowerCamelCase : str = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase_ ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCamelCase : Dict = (
'argument should be a bytes-like object or ASCII string, '
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(UpperCAmelCase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
__lowerCamelCase : int = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__lowerCamelCase : Union[str, Any] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__lowerCamelCase : Any = encoded_data[:-padding]
__lowerCamelCase : Optional[Any] = ''.join(
bin(B64_CHARSET.index(UpperCAmelCase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__lowerCamelCase : Any = ''.join(
bin(B64_CHARSET.index(UpperCAmelCase_ ) )[2:].zfill(6 ) for char in encoded_data )
__lowerCamelCase : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase_ ) , 8 )
]
return bytes(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185
| 1
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = 9.8_0_6_6_5
def UpperCamelCase_( snake_case : float , snake_case : float , snake_case : float = g ):
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 92
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , lowercase_ ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = load_tool("text-classification" )
self.tool.setup()
snake_case_ = load_tool("text-classification" , remote=a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 92
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Dict =DebertaTokenizer
lowerCamelCase : Optional[Any] =True
lowerCamelCase : List[Any] =DebertaTokenizerFast
def __a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
a : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a : Dict = {"unk_token": "[UNK]"}
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def __a ( self , **lowerCAmelCase__ ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
a : Dict = "lower newer"
a : Dict = "lower newer"
return input_text, output_text
def __a ( self ) -> List[Any]:
a : str = self.get_tokenizer()
a : str = "lower newer"
a : Union[str, Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a : Optional[int] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = tokens + [tokenizer.unk_token]
a : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : List[Any] = self.get_tokenizer()
a : Optional[Any] = tokenizer("Hello" , "World" )
a : List[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , lowerCAmelCase__ )
@slow
def __a ( self ) -> Tuple:
a : Tuple = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
a : Dict = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
a : str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
a : Dict = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
a : Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
a : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
a : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __a ( self ) -> str:
a : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
a : int = tokenizer_class.from_pretrained("microsoft/deberta-base" )
a : Optional[int] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
a : Dict = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Optional[Any] = [tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) for seq in encoding["input_ids"]]
# fmt: off
a : Optional[int] = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
a : Union[str, Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , lowerCAmelCase__ )
for expected, decoded in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 105
|
from math import pi, sqrt, tan
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__lowerCamelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(UpperCamelCase__ , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__lowerCamelCase = (sidea + sidea + sidea) / 2
__lowerCamelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print("\nSurface Areas of various geometric shapes: \n")
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 90
| 0
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A_ :
'''simple docstring'''
@staticmethod
def UpperCAmelCase_ ( *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple ) -> List[Any]:
UpperCAmelCase : Optional[int] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def UpperCAmelCase_ ( self : int , lowercase_ : List[Any] , lowercase_ : List[Any] ) -> int:
UpperCAmelCase : Optional[int] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : str = len(lowerCAmelCase__ )
self.assertGreater(lowerCAmelCase__ , 0 )
self.assertEqual(
lowerCAmelCase__ , [
{
'score': ANY(lowerCAmelCase__ ),
'label': ANY(lowerCAmelCase__ ),
'box': {'xmin': ANY(lowerCAmelCase__ ), 'ymin': ANY(lowerCAmelCase__ ), 'xmax': ANY(lowerCAmelCase__ ), 'ymax': ANY(lowerCAmelCase__ )},
}
for i in range(lowerCAmelCase__ )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
pass
@require_torch
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : Tuple = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
UpperCAmelCase : List[Any] = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self : str ) -> Tuple:
UpperCAmelCase : str = pipeline('zero-shot-object-detection' )
UpperCAmelCase : Any = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
pass
@require_torch
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
UpperCAmelCase : List[Any] = 0.2
UpperCAmelCase : Tuple = pipeline('zero-shot-object-detection' )
UpperCAmelCase : Tuple = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : List[Any] = 2
UpperCAmelCase : Any = pipeline('zero-shot-object-detection' )
UpperCAmelCase : Dict = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] , )
| 367
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase__ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def UpperCamelCase( UpperCAmelCase_ ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCAmelCase : List[Any] = list(s_dict.keys() )
for key in keys:
UpperCAmelCase : Union[str, Any] = R'.*/layers_(\d+)'
UpperCAmelCase : List[str] = key
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ )
UpperCAmelCase : str = R'(encoder|decoder)\/'
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups()
if groups[0] == "encoder":
UpperCAmelCase : Union[str, Any] = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ )
elif groups[0] == "decoder":
UpperCAmelCase : Tuple = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase : List[str] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase : List[Any] = s_dict.pop(UpperCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase : List[str] = s_dict[key].shape[0]
UpperCAmelCase : List[Any] = s_dict[key]
for idx in range(UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCAmelCase_ )
return s_dict
lowercase__ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : Union[str, Any] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ )
UpperCAmelCase : str = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase : Dict = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ )
UpperCAmelCase : str = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0]
UpperCAmelCase : Union[str, Any] = str(activation[1] )
UpperCAmelCase : Optional[int] = num_experts
UpperCAmelCase : List[str] = SwitchTransformersConfig(**UpperCAmelCase_ )
return config
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_="./" , UpperCAmelCase_=8 ):
# Initialise PyTorch model
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
if gin_file is not None:
UpperCAmelCase : List[Any] = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ )
else:
UpperCAmelCase : str = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ )
UpperCAmelCase : str = flax_params['target']
UpperCAmelCase : Union[str, Any] = flatten_dict(UpperCAmelCase_ , sep='/' )
UpperCAmelCase : Tuple = rename_keys(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = unflatten_dict(UpperCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 280
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.